aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/s390/Kconfig17
-rw-r--r--arch/s390/appldata/appldata.h16
-rw-r--r--arch/s390/appldata/appldata_base.c81
-rw-r--r--arch/s390/appldata/appldata_os.c1
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/hypfs/hypfs.h2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c16
-rw-r--r--arch/s390/hypfs/hypfs_diag.h2
-rw-r--r--arch/s390/hypfs/inode.c12
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S16
-rw-r--r--arch/s390/kernel/head.S69
-rw-r--r--arch/s390/kernel/head31.S48
-rw-r--r--arch/s390/kernel/head64.S59
-rw-r--r--arch/s390/kernel/ipl.c942
-rw-r--r--arch/s390/kernel/kprobes.c657
-rw-r--r--arch/s390/kernel/reipl.S33
-rw-r--r--arch/s390/kernel/reipl64.S34
-rw-r--r--arch/s390/kernel/reipl_diag.c39
-rw-r--r--arch/s390/kernel/s390_ksyms.c6
-rw-r--r--arch/s390/kernel/setup.c272
-rw-r--r--arch/s390/kernel/signal.c40
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/uaccess.S211
-rw-r--r--arch/s390/lib/uaccess64.S207
-rw-r--r--arch/s390/lib/uaccess_mvcos.c156
-rw-r--r--arch/s390/lib/uaccess_std.c340
-rw-r--r--arch/s390/mm/cmm.c30
-rw-r--r--arch/s390/mm/fault.c40
-rw-r--r--arch/s390/mm/init.c36
-rw-r--r--drivers/base/hypervisor.c3
-rw-r--r--drivers/s390/Kconfig30
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_devmap.c82
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monwriter.c292
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/cio.c95
-rw-r--r--drivers/s390/cio/css.c203
-rw-r--r--drivers/s390/cio/device.c109
-rw-r--r--drivers/s390/cio/device_fsm.c40
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c81
-rw-r--r--drivers/s390/cio/qdio.c4
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/crypto/Makefile15
-rw-r--r--drivers/s390/crypto/ap_bus.c1221
-rw-r--r--drivers/s390/crypto/ap_bus.h158
-rw-r--r--drivers/s390/crypto/z90common.h166
-rw-r--r--drivers/s390/crypto/z90crypt.h71
-rw-r--r--drivers/s390/crypto/z90hardware.c2531
-rw-r--r--drivers/s390/crypto/z90main.c3379
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1091
-rw-r--r--drivers/s390/crypto/zcrypt_api.h141
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h350
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c435
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h126
-rw-r--r--drivers/s390/crypto/zcrypt_error.h133
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c100
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c418
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h117
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c630
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h176
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c951
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h79
-rw-r--r--drivers/s390/s390mach.c17
-rw-r--r--drivers/s390/scsi/zfcp_def.h8
-rw-r--r--drivers/s390/sysinfo.c455
-rw-r--r--include/asm-s390/Kbuild2
-rw-r--r--include/asm-s390/appldata.h90
-rw-r--r--include/asm-s390/cio.h7
-rw-r--r--include/asm-s390/dma.h2
-rw-r--r--include/asm-s390/futex.h87
-rw-r--r--include/asm-s390/io.h2
-rw-r--r--include/asm-s390/kdebug.h59
-rw-r--r--include/asm-s390/kprobes.h114
-rw-r--r--include/asm-s390/lowcore.h14
-rw-r--r--include/asm-s390/monwriter.h33
-rw-r--r--include/asm-s390/pgalloc.h67
-rw-r--r--include/asm-s390/pgtable.h124
-rw-r--r--include/asm-s390/processor.h17
-rw-r--r--include/asm-s390/setup.h66
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--include/asm-s390/uaccess.h172
-rw-r--r--include/asm-s390/unistd.h170
-rw-r--r--include/asm-s390/z90crypt.h212
-rw-r--r--include/asm-s390/zcrypt.h285
-rw-r--r--include/linux/mod_devicetable.h11
-rw-r--r--scripts/mod/file2alias.c12
99 files changed, 10478 insertions, 8281 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 11b59d2c7cf5..ed2a83cfad7c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2452,6 +2452,8 @@ S: Maintained
2452S390 2452S390
2453P: Martin Schwidefsky 2453P: Martin Schwidefsky
2454M: schwidefsky@de.ibm.com 2454M: schwidefsky@de.ibm.com
2455P: Heiko Carstens
2456M: heiko.carstens@de.ibm.com
2455M: linux390@de.ibm.com 2457M: linux390@de.ibm.com
2456L: linux-390@vm.marist.edu 2458L: linux-390@vm.marist.edu
2457W: http://www.ibm.com/developerworks/linux/linux390/ 2459W: http://www.ibm.com/developerworks/linux/linux390/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2f4f70c4dbb2..b216ca659cdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -460,8 +460,7 @@ config S390_HYPFS_FS
460 information in an s390 hypervisor environment. 460 information in an s390 hypervisor environment.
461 461
462config KEXEC 462config KEXEC
463 bool "kexec system call (EXPERIMENTAL)" 463 bool "kexec system call"
464 depends on EXPERIMENTAL
465 help 464 help
466 kexec is a system call that implements the ability to shutdown your 465 kexec is a system call that implements the ability to shutdown your
467 current kernel, and to start another kernel. It is like a reboot 466 current kernel, and to start another kernel. It is like a reboot
@@ -487,8 +486,22 @@ source "drivers/net/Kconfig"
487 486
488source "fs/Kconfig" 487source "fs/Kconfig"
489 488
489menu "Instrumentation Support"
490
490source "arch/s390/oprofile/Kconfig" 491source "arch/s390/oprofile/Kconfig"
491 492
493config KPROBES
494 bool "Kprobes (EXPERIMENTAL)"
495 depends on EXPERIMENTAL && MODULES
496 help
497 Kprobes allows you to trap at almost any kernel address and
498 execute a callback function. register_kprobe() establishes
499 a probepoint and specifies the callback. Kprobes is useful
500 for kernel debugging, non-intrusive instrumentation and testing.
501 If in doubt, say "N".
502
503endmenu
504
492source "arch/s390/Kconfig.debug" 505source "arch/s390/Kconfig.debug"
493 506
494source "security/Kconfig" 507source "security/Kconfig"
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index 71d65eb30650..0429481dea63 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -29,22 +29,6 @@
29#define CTL_APPLDATA_NET_SUM 2125 29#define CTL_APPLDATA_NET_SUM 2125
30#define CTL_APPLDATA_PROC 2126 30#define CTL_APPLDATA_PROC 2126
31 31
32#ifndef CONFIG_64BIT
33
34#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
35#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
36#define APPLDATA_GEN_EVENT_RECORD 0x02
37#define APPLDATA_START_CONFIG_REC 0x03
38
39#else
40
41#define APPLDATA_START_INTERVAL_REC 0x80
42#define APPLDATA_STOP_REC 0x81
43#define APPLDATA_GEN_EVENT_RECORD 0x82
44#define APPLDATA_START_CONFIG_REC 0x83
45
46#endif /* CONFIG_64BIT */
47
48#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) 32#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
49#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) 33#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
50#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) 34#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index a0a94e0ef8d1..b69ed742f981 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -14,20 +14,20 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <asm/uaccess.h>
18#include <asm/io.h>
19#include <asm/smp.h>
20#include <linux/interrupt.h> 17#include <linux/interrupt.h>
21#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
22#include <linux/page-flags.h> 19#include <linux/page-flags.h>
23#include <linux/swap.h> 20#include <linux/swap.h>
24#include <linux/pagemap.h> 21#include <linux/pagemap.h>
25#include <linux/sysctl.h> 22#include <linux/sysctl.h>
26#include <asm/timer.h>
27//#include <linux/kernel_stat.h>
28#include <linux/notifier.h> 23#include <linux/notifier.h>
29#include <linux/cpu.h> 24#include <linux/cpu.h>
30#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <asm/appldata.h>
27#include <asm/timer.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31 31
32#include "appldata.h" 32#include "appldata.h"
33 33
@@ -39,34 +39,6 @@
39 39
40#define TOD_MICRO 0x01000 /* nr. of TOD clock units 40#define TOD_MICRO 0x01000 /* nr. of TOD clock units
41 for 1 microsecond */ 41 for 1 microsecond */
42
43/*
44 * Parameter list for DIAGNOSE X'DC'
45 */
46#ifndef CONFIG_64BIT
47struct appldata_parameter_list {
48 u16 diag; /* The DIAGNOSE code X'00DC' */
49 u8 function; /* The function code for the DIAGNOSE */
50 u8 parlist_length; /* Length of the parameter list */
51 u32 product_id_addr; /* Address of the 16-byte product ID */
52 u16 reserved;
53 u16 buffer_length; /* Length of the application data buffer */
54 u32 buffer_addr; /* Address of the application data buffer */
55};
56#else
57struct appldata_parameter_list {
58 u16 diag;
59 u8 function;
60 u8 parlist_length;
61 u32 unused01;
62 u16 reserved;
63 u16 buffer_length;
64 u32 unused02;
65 u64 product_id_addr;
66 u64 buffer_addr;
67};
68#endif /* CONFIG_64BIT */
69
70/* 42/*
71 * /proc entries (sysctl) 43 * /proc entries (sysctl)
72 */ 44 */
@@ -181,46 +153,17 @@ static void appldata_work_fn(void *data)
181int appldata_diag(char record_nr, u16 function, unsigned long buffer, 153int appldata_diag(char record_nr, u16 function, unsigned long buffer,
182 u16 length, char *mod_lvl) 154 u16 length, char *mod_lvl)
183{ 155{
184 unsigned long ry; 156 struct appldata_product_id id = {
185 struct appldata_product_id {
186 char prod_nr[7]; /* product nr. */
187 char prod_fn[2]; /* product function */
188 char record_nr; /* record nr. */
189 char version_nr[2]; /* version */
190 char release_nr[2]; /* release */
191 char mod_lvl[2]; /* modification lvl. */
192 } appldata_product_id = {
193 /* all strings are EBCDIC, record_nr is byte */
194 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 157 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
195 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 158 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
196 .prod_fn = {0xD5, 0xD3}, /* "NL" */ 159 .prod_fn = 0xD5D3, /* "NL" */
197 .record_nr = record_nr, 160 .record_nr = record_nr,
198 .version_nr = {0xF2, 0xF6}, /* "26" */ 161 .version_nr = 0xF2F6, /* "26" */
199 .release_nr = {0xF0, 0xF1}, /* "01" */ 162 .release_nr = 0xF0F1, /* "01" */
200 .mod_lvl = {mod_lvl[0], mod_lvl[1]}, 163 .mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1],
201 };
202 struct appldata_parameter_list appldata_parameter_list = {
203 .diag = 0xDC,
204 .function = function,
205 .parlist_length =
206 sizeof(appldata_parameter_list),
207 .buffer_length = length,
208 .product_id_addr =
209 (unsigned long) &appldata_product_id,
210 .buffer_addr = virt_to_phys((void *) buffer)
211 }; 164 };
212 165
213 if (!MACHINE_IS_VM) 166 return appldata_asm(&id, function, (void *) buffer, length);
214 return -ENOSYS;
215 ry = -1;
216 asm volatile(
217 "diag %1,%0,0xDC\n\t"
218 : "=d" (ry)
219 : "d" (&appldata_parameter_list),
220 "m" (appldata_parameter_list),
221 "m" (appldata_product_id)
222 : "cc");
223 return (int) ry;
224} 167}
225/************************ timer, work, DIAG <END> ****************************/ 168/************************ timer, work, DIAG <END> ****************************/
226 169
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 161acc5c8a1b..76a15523ae9e 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -16,6 +16,7 @@
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <asm/appldata.h>
19#include <asm/smp.h> 20#include <asm/smp.h>
20 21
21#include "appldata.h" 22#include "appldata.h"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f1d4591eddbb..35da53986b1b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -428,6 +428,7 @@ CONFIG_S390_TAPE_34XX=m
428# CONFIG_VMLOGRDR is not set 428# CONFIG_VMLOGRDR is not set
429# CONFIG_VMCP is not set 429# CONFIG_VMCP is not set
430# CONFIG_MONREADER is not set 430# CONFIG_MONREADER is not set
431CONFIG_MONWRITER=m
431 432
432# 433#
433# Cryptographic devices 434# Cryptographic devices
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index ea5567be00fc..f3dbd91965c6 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs.h 2 * arch/s390/hypfs/hypfs.h
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 1785bce2b919..75144efbb92b 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs_diag.c 2 * arch/s390/hypfs/hypfs_diag.c
3 * Hypervisor filesystem for Linux on s390. Diag 204 and 224 3 * Hypervisor filesystem for Linux on s390. Diag 204 and 224
4 * implementation. 4 * implementation.
5 * 5 *
@@ -432,12 +432,14 @@ static int diag204_probe(void)
432 432
433 buf = diag204_get_buffer(INFO_EXT, &pages); 433 buf = diag204_get_buffer(INFO_EXT, &pages);
434 if (!IS_ERR(buf)) { 434 if (!IS_ERR(buf)) {
435 if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) { 435 if (diag204((unsigned long)SUBC_STIB7 |
436 (unsigned long)INFO_EXT, pages, buf) >= 0) {
436 diag204_store_sc = SUBC_STIB7; 437 diag204_store_sc = SUBC_STIB7;
437 diag204_info_type = INFO_EXT; 438 diag204_info_type = INFO_EXT;
438 goto out; 439 goto out;
439 } 440 }
440 if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) { 441 if (diag204((unsigned long)SUBC_STIB6 |
442 (unsigned long)INFO_EXT, pages, buf) >= 0) {
441 diag204_store_sc = SUBC_STIB7; 443 diag204_store_sc = SUBC_STIB7;
442 diag204_info_type = INFO_EXT; 444 diag204_info_type = INFO_EXT;
443 goto out; 445 goto out;
@@ -452,7 +454,8 @@ static int diag204_probe(void)
452 rc = PTR_ERR(buf); 454 rc = PTR_ERR(buf);
453 goto fail_alloc; 455 goto fail_alloc;
454 } 456 }
455 if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) { 457 if (diag204((unsigned long)SUBC_STIB4 |
458 (unsigned long)INFO_SIMPLE, pages, buf) >= 0) {
456 diag204_store_sc = SUBC_STIB4; 459 diag204_store_sc = SUBC_STIB4;
457 diag204_info_type = INFO_SIMPLE; 460 diag204_info_type = INFO_SIMPLE;
458 goto out; 461 goto out;
@@ -476,7 +479,8 @@ static void *diag204_store(void)
476 buf = diag204_get_buffer(diag204_info_type, &pages); 479 buf = diag204_get_buffer(diag204_info_type, &pages);
477 if (IS_ERR(buf)) 480 if (IS_ERR(buf))
478 goto out; 481 goto out;
479 if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0) 482 if (diag204((unsigned long)diag204_store_sc |
483 (unsigned long)diag204_info_type, pages, buf) < 0)
480 return ERR_PTR(-ENOSYS); 484 return ERR_PTR(-ENOSYS);
481out: 485out:
482 return buf; 486 return buf;
@@ -531,7 +535,7 @@ __init int hypfs_diag_init(void)
531 return rc; 535 return rc;
532} 536}
533 537
534__exit void hypfs_diag_exit(void) 538void hypfs_diag_exit(void)
535{ 539{
536 diag224_delete_name_table(); 540 diag224_delete_name_table();
537 diag204_free_buffer(); 541 diag204_free_buffer();
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
index 793dea6b9bb6..256b384aebe1 100644
--- a/arch/s390/hypfs/hypfs_diag.h
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/hypfs_diag.h 2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 18c091925ea5..bdade5f2e325 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * fs/hypfs/inode.c 2 * arch/s390/hypfs/inode.c
3 * Hypervisor filesystem for Linux on s390. 3 * Hypervisor filesystem for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2006 5 * Copyright (C) IBM Corp. 2006
@@ -312,10 +312,12 @@ static void hypfs_kill_super(struct super_block *sb)
312{ 312{
313 struct hypfs_sb_info *sb_info = sb->s_fs_info; 313 struct hypfs_sb_info *sb_info = sb->s_fs_info;
314 314
315 hypfs_delete_tree(sb->s_root); 315 if (sb->s_root) {
316 hypfs_remove(sb_info->update_file); 316 hypfs_delete_tree(sb->s_root);
317 kfree(sb->s_fs_info); 317 hypfs_remove(sb_info->update_file);
318 sb->s_fs_info = NULL; 318 kfree(sb->s_fs_info);
319 sb->s_fs_info = NULL;
320 }
319 kill_litter_super(sb); 321 kill_litter_super(sb);
320} 322}
321 323
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9a33ed6ca696..aa978978d3d1 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o \ 7obj-y := bitmap.o traps.o time.o process.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o 9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -24,6 +24,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
24 24
25obj-$(CONFIG_VIRT_TIMER) += vtime.o 25obj-$(CONFIG_VIRT_TIMER) += vtime.o
26obj-$(CONFIG_STACKTRACE) += stacktrace.o 26obj-$(CONFIG_STACKTRACE) += stacktrace.o
27obj-$(CONFIG_KPROBES) += kprobes.o
27 28
28# Kexec part 29# Kexec part
29S390_KEXEC_OBJS := machine_kexec.o crash.o 30S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5b5799ac8f83..0c712b78a7e8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -505,6 +505,8 @@ pgm_no_vtime2:
505 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 505 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
506 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 506 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
507 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 507 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
508 tm SP_PSW+1(%r15),0x01 # kernel per event ?
509 bz BASED(kernel_per)
508 l %r3,__LC_PGM_ILC # load program interruption code 510 l %r3,__LC_PGM_ILC # load program interruption code
509 la %r8,0x7f 511 la %r8,0x7f
510 nr %r8,%r3 # clear per-event-bit and ilc 512 nr %r8,%r3 # clear per-event-bit and ilc
@@ -536,6 +538,16 @@ pgm_no_vtime3:
536 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 538 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
537 b BASED(sysc_do_svc) 539 b BASED(sysc_do_svc)
538 540
541#
542# per was called from kernel, must be kprobes
543#
544kernel_per:
545 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
546 la %r2,SP_PTREGS(%r15) # address of register-save area
547 l %r1,BASED(.Lhandle_per) # load adr. of per handler
548 la %r14,BASED(sysc_leave) # load adr. of system return
549 br %r1 # branch to do_single_step
550
539/* 551/*
540 * IO interrupt handler routine 552 * IO interrupt handler routine
541 */ 553 */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 56f5f613b868..29bbfbab7332 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -518,6 +518,8 @@ pgm_no_vtime2:
518#endif 518#endif
519 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 519 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
520 lg %r1,__TI_task(%r9) 520 lg %r1,__TI_task(%r9)
521 tm SP_PSW+1(%r15),0x01 # kernel per event ?
522 jz kernel_per
521 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 523 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
522 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 524 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
523 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 525 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -553,6 +555,16 @@ pgm_no_vtime3:
553 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
554 j sysc_do_svc 556 j sysc_do_svc
555 557
558#
559# per was called from kernel, must be kprobes
560#
561kernel_per:
562 lhi %r0,__LC_PGM_OLD_PSW
563 sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
564 la %r2,SP_PTREGS(%r15) # address of register-save area
565 larl %r14,sysc_leave # load adr. of system ret, no work
566 jg do_single_step # branch to do_single_step
567
556/* 568/*
557 * IO interrupt handler routine 569 * IO interrupt handler routine
558 */ 570 */
@@ -815,7 +827,7 @@ restart_go:
815 */ 827 */
816stack_overflow: 828stack_overflow:
817 lg %r15,__LC_PANIC_STACK # change to panic stack 829 lg %r15,__LC_PANIC_STACK # change to panic stack
818 aghi %r1,-SP_SIZE 830 aghi %r15,-SP_SIZE
819 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 831 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
820 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 832 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
821 la %r1,__LC_SAVE_AREA 833 la %r1,__LC_SAVE_AREA
@@ -823,7 +835,7 @@ stack_overflow:
823 je 0f 835 je 0f
824 chi %r12,__LC_PGM_OLD_PSW 836 chi %r12,__LC_PGM_OLD_PSW
825 je 0f 837 je 0f
826 la %r1,__LC_SAVE_AREA+16 838 la %r1,__LC_SAVE_AREA+32
8270: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8390: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
828 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 840 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
829 la %r2,SP_PTREGS(%r15) # load pt_regs 841 la %r2,SP_PTREGS(%r15) # load pt_regs
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index adad8863ee2f..0f1db268a8a9 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -272,7 +272,7 @@ iplstart:
272# load parameter file from ipl device 272# load parameter file from ipl device
273# 273#
274.Lagain1: 274.Lagain1:
275 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # ramdisk loc. is temp 275 l %r2,.Linitrd # ramdisk loc. is temp
276 bas %r14,.Lloader # load parameter file 276 bas %r14,.Lloader # load parameter file
277 ltr %r2,%r2 # got anything ? 277 ltr %r2,%r2 # got anything ?
278 bz .Lnopf 278 bz .Lnopf
@@ -280,7 +280,7 @@ iplstart:
280 bnh .Lnotrunc 280 bnh .Lnotrunc
281 la %r2,895 281 la %r2,895
282.Lnotrunc: 282.Lnotrunc:
283 l %r4,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 283 l %r4,.Linitrd
284 clc 0(3,%r4),.L_hdr # if it is HDRx 284 clc 0(3,%r4),.L_hdr # if it is HDRx
285 bz .Lagain1 # skip dataset header 285 bz .Lagain1 # skip dataset header
286 clc 0(3,%r4),.L_eof # if it is EOFx 286 clc 0(3,%r4),.L_eof # if it is EOFx
@@ -323,14 +323,15 @@ iplstart:
323# load ramdisk from ipl device 323# load ramdisk from ipl device
324# 324#
325.Lagain2: 325.Lagain2:
326 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # addr of ramdisk 326 l %r2,.Linitrd # addr of ramdisk
327 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
327 bas %r14,.Lloader # load ramdisk 328 bas %r14,.Lloader # load ramdisk
328 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk 329 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
329 ltr %r2,%r2 330 ltr %r2,%r2
330 bnz .Lrdcont 331 bnz .Lrdcont
331 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found 332 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
332.Lrdcont: 333.Lrdcont:
333 l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 334 l %r2,.Linitrd
334 335
335 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx 336 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
336 bz .Lagain2 337 bz .Lagain2
@@ -379,6 +380,7 @@ iplstart:
379 l %r1,.Lstartup 380 l %r1,.Lstartup
380 br %r1 381 br %r1
381 382
383.Linitrd:.long _end + 0x400000 # default address of initrd
382.Lparm: .long PARMAREA 384.Lparm: .long PARMAREA
383.Lstartup: .long startup 385.Lstartup: .long startup
384.Lcvtab:.long _ebcasc # ebcdic to ascii table 386.Lcvtab:.long _ebcasc # ebcdic to ascii table
@@ -479,65 +481,6 @@ start:
479 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 481 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
480 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff 482 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
481 483
482.macro GET_IPL_DEVICE
483.Lget_ipl_device:
484 l %r1,0xb8 # get sid
485 sll %r1,15 # test if subchannel is enabled
486 srl %r1,31
487 ltr %r1,%r1
488 bz 2f-.LPG1(%r13) # subchannel disabled
489 l %r1,0xb8
490 la %r5,.Lipl_schib-.LPG1(%r13)
491 stsch 0(%r5) # get schib of subchannel
492 bnz 2f-.LPG1(%r13) # schib not available
493 tm 5(%r5),0x01 # devno valid?
494 bno 2f-.LPG1(%r13)
495 la %r6,ipl_parameter_flags-.LPG1(%r13)
496 oi 3(%r6),0x01 # set flag
497 la %r2,ipl_devno-.LPG1(%r13)
498 mvc 0(2,%r2),6(%r5) # store devno
499 tm 4(%r5),0x80 # qdio capable device?
500 bno 2f-.LPG1(%r13)
501 oi 3(%r6),0x02 # set flag
502
503 # copy ipl parameters
504
505 lhi %r0,4096
506 l %r2,20(%r0) # get address of parameter list
507 lhi %r3,IPL_PARMBLOCK_ORIGIN
508 st %r3,20(%r0)
509 lhi %r4,1
510 cr %r2,%r3 # start parameters < destination ?
511 jl 0f
512 lhi %r1,1 # copy direction is upwards
513 j 1f
5140: lhi %r1,-1 # copy direction is downwards
515 ar %r2,%r0
516 ar %r3,%r0
517 ar %r2,%r1
518 ar %r3,%r1
5191: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
520 ar %r3,%r1
521 ar %r2,%r1
522 sr %r0,%r4
523 jne 1b
524 b 2f-.LPG1(%r13)
525
526 .align 4
527.Lipl_schib:
528 .rept 13
529 .long 0
530 .endr
531
532 .globl ipl_parameter_flags
533ipl_parameter_flags:
534 .long 0
535 .globl ipl_devno
536ipl_devno:
537 .word 0
5382:
539.endm
540
541#ifdef CONFIG_64BIT 484#ifdef CONFIG_64BIT
542#include "head64.S" 485#include "head64.S"
543#else 486#else
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index a4dc61f3285e..1fa9fa1ca740 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
26# 26#
27 .org PARMAREA 27 .org PARMAREA
28 .long 0,0 # IPL_DEVICE 28 .long 0,0 # IPL_DEVICE
29 .long 0,RAMDISK_ORIGIN # INITRD_START 29 .long 0,0 # INITRD_START
30 .long 0,RAMDISK_SIZE # INITRD_SIZE 30 .long 0,0 # INITRD_SIZE
31 31
32 .org COMMAND_LINE 32 .org COMMAND_LINE
33 .byte "root=/dev/ram0 ro" 33 .byte "root=/dev/ram0 ro"
@@ -37,12 +37,23 @@ startup:basr %r13,0 # get base
37 37
38startup_continue: 38startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: GET_IPL_DEVICE 40.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
41 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 41 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
42 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 42 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
43 # move IPL device to lowcore 43 # move IPL device to lowcore
44 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) 44 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
45#
46# Setup stack
47#
48 l %r15,.Linittu-.LPG1(%r13)
49 mvc __LC_CURRENT(4),__TI_task(%r15)
50 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
45 54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
46# 57#
47# clear bss memory 58# clear bss memory
48# 59#
@@ -114,6 +125,10 @@ startup_continue:
114 b .Lfchunk-.LPG1(%r13) 125 b .Lfchunk-.LPG1(%r13)
115 126
116 .align 4 127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
117.Lpmask: 132.Lpmask:
118 .byte 0 133 .byte 0
119.align 8 134.align 8
@@ -273,7 +288,23 @@ startup_continue:
273.Lbss_end: .long _end 288.Lbss_end: .long _end
274.Lparmaddr: .long PARMAREA 289.Lparmaddr: .long PARMAREA
275.Lsccbaddr: .long .Lsccb 290.Lsccbaddr: .long .Lsccb
291
292 .globl ipl_schib
293ipl_schib:
294 .rept 13
295 .long 0
296 .endr
297
298 .globl ipl_flags
299ipl_flags:
300 .long 0
301 .globl ipl_devno
302ipl_devno:
303 .word 0
304
276 .org 0x12000 305 .org 0x12000
306.globl s390_readinfo_sccb
307s390_readinfo_sccb:
277.Lsccb: 308.Lsccb:
278 .hword 0x1000 # length, one page 309 .hword 0x1000 # length, one page
279 .byte 0x00,0x00,0x00 310 .byte 0x00,0x00,0x00
@@ -302,16 +333,6 @@ startup_continue:
302 .globl _stext 333 .globl _stext
303_stext: basr %r13,0 # get base 334_stext: basr %r13,0 # get base
304.LPG3: 335.LPG3:
305#
306# Setup stack
307#
308 l %r15,.Linittu-.LPG3(%r13)
309 mvc __LC_CURRENT(4),__TI_task(%r15)
310 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
311 st %r15,__LC_KERNEL_STACK # set end of kernel stack
312 ahi %r15,-96
313 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
314
315# check control registers 336# check control registers
316 stctl %c0,%c15,0(%r15) 337 stctl %c0,%c15,0(%r15)
317 oi 2(%r15),0x40 # enable sigp emergency signal 338 oi 2(%r15),0x40 # enable sigp emergency signal
@@ -330,6 +351,5 @@ _stext: basr %r13,0 # get base
330# 351#
331 .align 8 352 .align 8
332.Ldw: .long 0x000a0000,0x00000000 353.Ldw: .long 0x000a0000,0x00000000
333.Linittu:.long init_thread_union
334.Lstart:.long start_kernel 354.Lstart:.long start_kernel
335.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 355.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 9d80c5b1ef95..a8bdd96494c7 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
26# 26#
27 .org PARMAREA 27 .org PARMAREA
28 .quad 0 # IPL_DEVICE 28 .quad 0 # IPL_DEVICE
29 .quad RAMDISK_ORIGIN # INITRD_START 29 .quad 0 # INITRD_START
30 .quad RAMDISK_SIZE # INITRD_SIZE 30 .quad 0 # INITRD_SIZE
31 31
32 .org COMMAND_LINE 32 .org COMMAND_LINE
33 .byte "root=/dev/ram0 ro" 33 .byte "root=/dev/ram0 ro"
@@ -39,8 +39,8 @@ startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: sll %r13,1 # remove high order bit 40.LPG1: sll %r13,1 # remove high order bit
41 srl %r13,1 41 srl %r13,1
42 GET_IPL_DEVICE
43 lhi %r1,1 # mode 1 = esame 42 lhi %r1,1 # mode 1 = esame
43 mvi __LC_AR_MODE_ID,1 # set esame flag
44 slr %r0,%r0 # set cpuid to zero 44 slr %r0,%r0 # set cpuid to zero
45 sigp %r1,%r0,0x12 # switch to esame mode 45 sigp %r1,%r0,0x12 # switch to esame mode
46 sam64 # switch to 64 bit mode 46 sam64 # switch to 64 bit mode
@@ -48,7 +48,18 @@ startup_continue:
48 lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area 48 lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
49 # move IPL device to lowcore 49 # move IPL device to lowcore
50 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) 50 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
51#
52# Setup stack
53#
54 larl %r15,init_thread_union
55 lg %r14,__TI_task(%r15) # cache current in lowcore
56 stg %r14,__LC_CURRENT
57 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
51 61
62 brasl %r14,ipl_save_parameters
52# 63#
53# clear bss memory 64# clear bss memory
54# 65#
@@ -239,6 +250,19 @@ startup_continue:
239 oi 7(%r12),0x80 # set IDTE flag 250 oi 7(%r12),0x80 # set IDTE flag
2400: 2510:
241 252
253#
254# find out if we have the MVCOS instruction
255#
256 la %r1,0f-.LPG1(%r13) # set program check address
257 stg %r1,__LC_PGM_NEW_PSW+8
258 .short 0xc800 # mvcos 0(%r0),0(%r0),%r0
259 .short 0x0000
260 .short 0x0000
2610: tm 0x8f,0x13 # special-operation exception?
262 bno 1f-.LPG1(%r13) # if yes, MVCOS is present
263 oi 6(%r12),2 # set MVCOS flag
2641:
265
242 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, 266 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
243 # virtual and never return ... 267 # virtual and never return ...
244 .align 16 268 .align 16
@@ -268,7 +292,22 @@ startup_continue:
268.Lparmaddr: 292.Lparmaddr:
269 .quad PARMAREA 293 .quad PARMAREA
270 294
295 .globl ipl_schib
296ipl_schib:
297 .rept 13
298 .long 0
299 .endr
300
301 .globl ipl_flags
302ipl_flags:
303 .long 0
304 .globl ipl_devno
305ipl_devno:
306 .word 0
307
271 .org 0x12000 308 .org 0x12000
309.globl s390_readinfo_sccb
310s390_readinfo_sccb:
272.Lsccb: 311.Lsccb:
273 .hword 0x1000 # length, one page 312 .hword 0x1000 # length, one page
274 .byte 0x00,0x00,0x00 313 .byte 0x00,0x00,0x00
@@ -297,24 +336,12 @@ startup_continue:
297 .globl _stext 336 .globl _stext
298_stext: basr %r13,0 # get base 337_stext: basr %r13,0 # get base
299.LPG3: 338.LPG3:
300#
301# Setup stack
302#
303 larl %r15,init_thread_union
304 lg %r14,__TI_task(%r15) # cache current in lowcore
305 stg %r14,__LC_CURRENT
306 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
307 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
308 aghi %r15,-160
309 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
310
311# check control registers 339# check control registers
312 stctg %c0,%c15,0(%r15) 340 stctg %c0,%c15,0(%r15)
313 oi 6(%r15),0x40 # enable sigp emergency signal 341 oi 6(%r15),0x40 # enable sigp emergency signal
314 oi 4(%r15),0x10 # switch on low address proctection 342 oi 4(%r15),0x10 # switch on low address proctection
315 lctlg %c0,%c15,0(%r15) 343 lctlg %c0,%c15,0(%r15)
316 344
317#
318 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess 345 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
319 brasl %r14,start_kernel # go to C code 346 brasl %r14,start_kernel # go to C code
320# 347#
@@ -322,7 +349,7 @@ _stext: basr %r13,0 # get base
322# 349#
323 basr %r13,0 350 basr %r13,0
324 lpswe .Ldw-.(%r13) # load disabled wait psw 351 lpswe .Ldw-.(%r13) # load disabled wait psw
325# 352
326 .align 8 353 .align 8
327.Ldw: .quad 0x0002000180000000,0x0000000000000000 354.Ldw: .quad 0x0002000180000000,0x0000000000000000
328.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 355.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
new file mode 100644
index 000000000000..6555cc48e28f
--- /dev/null
+++ b/arch/s390/kernel/ipl.c
@@ -0,0 +1,942 @@
1/*
2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2005,2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com>
9 */
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/delay.h>
15#include <linux/reboot.h>
16#include <asm/smp.h>
17#include <asm/setup.h>
18#include <asm/cpcmd.h>
19#include <asm/cio.h>
20
21#define IPL_PARM_BLOCK_VERSION 0
22
23enum ipl_type {
24 IPL_TYPE_NONE = 1,
25 IPL_TYPE_UNKNOWN = 2,
26 IPL_TYPE_CCW = 4,
27 IPL_TYPE_FCP = 8,
28};
29
30#define IPL_NONE_STR "none"
31#define IPL_UNKNOWN_STR "unknown"
32#define IPL_CCW_STR "ccw"
33#define IPL_FCP_STR "fcp"
34
35static char *ipl_type_str(enum ipl_type type)
36{
37 switch (type) {
38 case IPL_TYPE_NONE:
39 return IPL_NONE_STR;
40 case IPL_TYPE_CCW:
41 return IPL_CCW_STR;
42 case IPL_TYPE_FCP:
43 return IPL_FCP_STR;
44 case IPL_TYPE_UNKNOWN:
45 default:
46 return IPL_UNKNOWN_STR;
47 }
48}
49
50enum ipl_method {
51 IPL_METHOD_NONE,
52 IPL_METHOD_CCW_CIO,
53 IPL_METHOD_CCW_DIAG,
54 IPL_METHOD_CCW_VM,
55 IPL_METHOD_FCP_RO_DIAG,
56 IPL_METHOD_FCP_RW_DIAG,
57 IPL_METHOD_FCP_RO_VM,
58};
59
60enum shutdown_action {
61 SHUTDOWN_REIPL,
62 SHUTDOWN_DUMP,
63 SHUTDOWN_STOP,
64};
65
66#define SHUTDOWN_REIPL_STR "reipl"
67#define SHUTDOWN_DUMP_STR "dump"
68#define SHUTDOWN_STOP_STR "stop"
69
70static char *shutdown_action_str(enum shutdown_action action)
71{
72 switch (action) {
73 case SHUTDOWN_REIPL:
74 return SHUTDOWN_REIPL_STR;
75 case SHUTDOWN_DUMP:
76 return SHUTDOWN_DUMP_STR;
77 case SHUTDOWN_STOP:
78 return SHUTDOWN_STOP_STR;
79 default:
80 BUG();
81 }
82}
83
84enum diag308_subcode {
85 DIAG308_IPL = 3,
86 DIAG308_DUMP = 4,
87 DIAG308_SET = 5,
88 DIAG308_STORE = 6,
89};
90
91enum diag308_ipl_type {
92 DIAG308_IPL_TYPE_FCP = 0,
93 DIAG308_IPL_TYPE_CCW = 2,
94};
95
96enum diag308_opt {
97 DIAG308_IPL_OPT_IPL = 0x10,
98 DIAG308_IPL_OPT_DUMP = 0x20,
99};
100
101enum diag308_rc {
102 DIAG308_RC_OK = 1,
103};
104
105static int diag308_set_works = 0;
106
107static int reipl_capabilities = IPL_TYPE_UNKNOWN;
108static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
109static enum ipl_method reipl_method = IPL_METHOD_NONE;
110static struct ipl_parameter_block *reipl_block_fcp;
111static struct ipl_parameter_block *reipl_block_ccw;
112
113static int dump_capabilities = IPL_TYPE_NONE;
114static enum ipl_type dump_type = IPL_TYPE_NONE;
115static enum ipl_method dump_method = IPL_METHOD_NONE;
116static struct ipl_parameter_block *dump_block_fcp;
117static struct ipl_parameter_block *dump_block_ccw;
118
119static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
120
121static int diag308(unsigned long subcode, void *addr)
122{
123 register unsigned long _addr asm("0") = (unsigned long)addr;
124 register unsigned long _rc asm("1") = 0;
125
126 asm volatile (
127 " diag %0,%2,0x308\n"
128 "0: \n"
129 ".section __ex_table,\"a\"\n"
130#ifdef CONFIG_64BIT
131 " .align 8\n"
132 " .quad 0b, 0b\n"
133#else
134 " .align 4\n"
135 " .long 0b, 0b\n"
136#endif
137 ".previous\n"
138 : "+d" (_addr), "+d" (_rc)
139 : "d" (subcode) : "cc", "memory" );
140
141 return _rc;
142}
143
144/* SYSFS */
145
146#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
147static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
148 char *page) \
149{ \
150 return sprintf(page, _format, _value); \
151} \
152static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
153 __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
154
155#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
156static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
157 char *page) \
158{ \
159 return sprintf(page, _fmt_out, \
160 (unsigned long long) _value); \
161} \
162static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
163 const char *buf, size_t len) \
164{ \
165 unsigned long long value; \
166 if (sscanf(buf, _fmt_in, &value) != 1) \
167 return -EINVAL; \
168 _value = value; \
169 return len; \
170} \
171static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
172 __ATTR(_name,(S_IRUGO | S_IWUSR), \
173 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store);
175
176static void make_attrs_ro(struct attribute **attrs)
177{
178 while (*attrs) {
179 (*attrs)->mode = S_IRUGO;
180 attrs++;
181 }
182}
183
184/*
185 * ipl section
186 */
187
188static enum ipl_type ipl_get_type(void)
189{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191
192 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
195 return IPL_TYPE_CCW;
196 if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION)
197 return IPL_TYPE_UNKNOWN;
198 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
199 return IPL_TYPE_UNKNOWN;
200 return IPL_TYPE_FCP;
201}
202
203static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
204{
205 return sprintf(page, "%s\n", ipl_type_str(ipl_get_type()));
206}
207
208static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
209
210static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
211{
212 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
213
214 switch (ipl_get_type()) {
215 case IPL_TYPE_CCW:
216 return sprintf(page, "0.0.%04x\n", ipl_devno);
217 case IPL_TYPE_FCP:
218 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
219 default:
220 return 0;
221 }
222}
223
224static struct subsys_attribute sys_ipl_device_attr =
225 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
226
227static ssize_t ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off,
228 size_t count)
229{
230 unsigned int size = IPL_PARMBLOCK_SIZE;
231
232 if (off > size)
233 return 0;
234 if (off + count > size)
235 count = size - off;
236 memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
237 return count;
238}
239
240static struct bin_attribute ipl_parameter_attr = {
241 .attr = {
242 .name = "binary_parameter",
243 .mode = S_IRUGO,
244 .owner = THIS_MODULE,
245 },
246 .size = PAGE_SIZE,
247 .read = &ipl_parameter_read,
248};
249
250static ssize_t ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off,
251 size_t count)
252{
253 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
254 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
255
256 if (off > size)
257 return 0;
258 if (off + count > size)
259 count = size - off;
260 memcpy(buf, scp_data + off, count);
261 return count;
262}
263
264static struct bin_attribute ipl_scp_data_attr = {
265 .attr = {
266 .name = "scp_data",
267 .mode = S_IRUGO,
268 .owner = THIS_MODULE,
269 },
270 .size = PAGE_SIZE,
271 .read = &ipl_scp_data_read,
272};
273
274/* FCP ipl device attributes */
275
276DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long)
277 IPL_PARMBLOCK_START->ipl_info.fcp.wwpn);
278DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long)
279 IPL_PARMBLOCK_START->ipl_info.fcp.lun);
280DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
281 IPL_PARMBLOCK_START->ipl_info.fcp.bootprog);
282DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
283 IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
284
285static struct attribute *ipl_fcp_attrs[] = {
286 &sys_ipl_type_attr.attr,
287 &sys_ipl_device_attr.attr,
288 &sys_ipl_fcp_wwpn_attr.attr,
289 &sys_ipl_fcp_lun_attr.attr,
290 &sys_ipl_fcp_bootprog_attr.attr,
291 &sys_ipl_fcp_br_lba_attr.attr,
292 NULL,
293};
294
295static struct attribute_group ipl_fcp_attr_group = {
296 .attrs = ipl_fcp_attrs,
297};
298
299/* CCW ipl device attributes */
300
301static struct attribute *ipl_ccw_attrs[] = {
302 &sys_ipl_type_attr.attr,
303 &sys_ipl_device_attr.attr,
304 NULL,
305};
306
307static struct attribute_group ipl_ccw_attr_group = {
308 .attrs = ipl_ccw_attrs,
309};
310
311/* UNKNOWN ipl device attributes */
312
313static struct attribute *ipl_unknown_attrs[] = {
314 &sys_ipl_type_attr.attr,
315 NULL,
316};
317
318static struct attribute_group ipl_unknown_attr_group = {
319 .attrs = ipl_unknown_attrs,
320};
321
322static decl_subsys(ipl, NULL, NULL);
323
324/*
325 * reipl section
326 */
327
328/* FCP reipl device attributes */
329
330DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
331 reipl_block_fcp->ipl_info.fcp.wwpn);
332DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
333 reipl_block_fcp->ipl_info.fcp.lun);
334DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
335 reipl_block_fcp->ipl_info.fcp.bootprog);
336DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
337 reipl_block_fcp->ipl_info.fcp.br_lba);
338DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
339 reipl_block_fcp->ipl_info.fcp.devno);
340
341static struct attribute *reipl_fcp_attrs[] = {
342 &sys_reipl_fcp_device_attr.attr,
343 &sys_reipl_fcp_wwpn_attr.attr,
344 &sys_reipl_fcp_lun_attr.attr,
345 &sys_reipl_fcp_bootprog_attr.attr,
346 &sys_reipl_fcp_br_lba_attr.attr,
347 NULL,
348};
349
350static struct attribute_group reipl_fcp_attr_group = {
351 .name = IPL_FCP_STR,
352 .attrs = reipl_fcp_attrs,
353};
354
355/* CCW reipl device attributes */
356
357DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
358 reipl_block_ccw->ipl_info.ccw.devno);
359
360static struct attribute *reipl_ccw_attrs[] = {
361 &sys_reipl_ccw_device_attr.attr,
362 NULL,
363};
364
365static struct attribute_group reipl_ccw_attr_group = {
366 .name = IPL_CCW_STR,
367 .attrs = reipl_ccw_attrs,
368};
369
370/* reipl type */
371
372static int reipl_set_type(enum ipl_type type)
373{
374 if (!(reipl_capabilities & type))
375 return -EINVAL;
376
377 switch(type) {
378 case IPL_TYPE_CCW:
379 if (MACHINE_IS_VM)
380 reipl_method = IPL_METHOD_CCW_VM;
381 else
382 reipl_method = IPL_METHOD_CCW_CIO;
383 break;
384 case IPL_TYPE_FCP:
385 if (diag308_set_works)
386 reipl_method = IPL_METHOD_FCP_RW_DIAG;
387 else if (MACHINE_IS_VM)
388 reipl_method = IPL_METHOD_FCP_RO_VM;
389 else
390 reipl_method = IPL_METHOD_FCP_RO_DIAG;
391 break;
392 default:
393 reipl_method = IPL_METHOD_NONE;
394 }
395 reipl_type = type;
396 return 0;
397}
398
399static ssize_t reipl_type_show(struct subsystem *subsys, char *page)
400{
401 return sprintf(page, "%s\n", ipl_type_str(reipl_type));
402}
403
404static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
405 size_t len)
406{
407 int rc = -EINVAL;
408
409 if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
410 rc = reipl_set_type(IPL_TYPE_CCW);
411 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
412 rc = reipl_set_type(IPL_TYPE_FCP);
413 return (rc != 0) ? rc : len;
414}
415
416static struct subsys_attribute reipl_type_attr =
417 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
418
419static decl_subsys(reipl, NULL, NULL);
420
421/*
422 * dump section
423 */
424
425/* FCP dump device attributes */
426
427DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
428 dump_block_fcp->ipl_info.fcp.wwpn);
429DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
430 dump_block_fcp->ipl_info.fcp.lun);
431DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
432 dump_block_fcp->ipl_info.fcp.bootprog);
433DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
434 dump_block_fcp->ipl_info.fcp.br_lba);
435DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
436 dump_block_fcp->ipl_info.fcp.devno);
437
438static struct attribute *dump_fcp_attrs[] = {
439 &sys_dump_fcp_device_attr.attr,
440 &sys_dump_fcp_wwpn_attr.attr,
441 &sys_dump_fcp_lun_attr.attr,
442 &sys_dump_fcp_bootprog_attr.attr,
443 &sys_dump_fcp_br_lba_attr.attr,
444 NULL,
445};
446
447static struct attribute_group dump_fcp_attr_group = {
448 .name = IPL_FCP_STR,
449 .attrs = dump_fcp_attrs,
450};
451
452/* CCW dump device attributes */
453
454DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
455 dump_block_ccw->ipl_info.ccw.devno);
456
457static struct attribute *dump_ccw_attrs[] = {
458 &sys_dump_ccw_device_attr.attr,
459 NULL,
460};
461
462static struct attribute_group dump_ccw_attr_group = {
463 .name = IPL_CCW_STR,
464 .attrs = dump_ccw_attrs,
465};
466
467/* dump type */
468
469static int dump_set_type(enum ipl_type type)
470{
471 if (!(dump_capabilities & type))
472 return -EINVAL;
473 switch(type) {
474 case IPL_TYPE_CCW:
475 if (MACHINE_IS_VM)
476 dump_method = IPL_METHOD_CCW_VM;
477 else
478 dump_method = IPL_METHOD_CCW_CIO;
479 break;
480 case IPL_TYPE_FCP:
481 dump_method = IPL_METHOD_FCP_RW_DIAG;
482 break;
483 default:
484 dump_method = IPL_METHOD_NONE;
485 }
486 dump_type = type;
487 return 0;
488}
489
490static ssize_t dump_type_show(struct subsystem *subsys, char *page)
491{
492 return sprintf(page, "%s\n", ipl_type_str(dump_type));
493}
494
495static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
496 size_t len)
497{
498 int rc = -EINVAL;
499
500 if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0)
501 rc = dump_set_type(IPL_TYPE_NONE);
502 else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
503 rc = dump_set_type(IPL_TYPE_CCW);
504 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
505 rc = dump_set_type(IPL_TYPE_FCP);
506 return (rc != 0) ? rc : len;
507}
508
509static struct subsys_attribute dump_type_attr =
510 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
511
512static decl_subsys(dump, NULL, NULL);
513
514#ifdef CONFIG_SMP
515static void dump_smp_stop_all(void)
516{
517 int cpu;
518 preempt_disable();
519 for_each_online_cpu(cpu) {
520 if (cpu == smp_processor_id())
521 continue;
522 while (signal_processor(cpu, sigp_stop) == sigp_busy)
523 udelay(10);
524 }
525 preempt_enable();
526}
527#else
528#define dump_smp_stop_all() do { } while (0)
529#endif
530
531/*
532 * Shutdown actions section
533 */
534
535static decl_subsys(shutdown_actions, NULL, NULL);
536
537/* on panic */
538
539static ssize_t on_panic_show(struct subsystem *subsys, char *page)
540{
541 return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
542}
543
544static ssize_t on_panic_store(struct subsystem *subsys, const char *buf,
545 size_t len)
546{
547 if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
548 on_panic_action = SHUTDOWN_REIPL;
549 else if (strncmp(buf, SHUTDOWN_DUMP_STR,
550 strlen(SHUTDOWN_DUMP_STR)) == 0)
551 on_panic_action = SHUTDOWN_DUMP;
552 else if (strncmp(buf, SHUTDOWN_STOP_STR,
553 strlen(SHUTDOWN_STOP_STR)) == 0)
554 on_panic_action = SHUTDOWN_STOP;
555 else
556 return -EINVAL;
557
558 return len;
559}
560
561static struct subsys_attribute on_panic_attr =
562 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
563
564static void print_fcp_block(struct ipl_parameter_block *fcp_block)
565{
566 printk(KERN_EMERG "wwpn: %016llx\n",
567 (unsigned long long)fcp_block->ipl_info.fcp.wwpn);
568 printk(KERN_EMERG "lun: %016llx\n",
569 (unsigned long long)fcp_block->ipl_info.fcp.lun);
570 printk(KERN_EMERG "bootprog: %lld\n",
571 (unsigned long long)fcp_block->ipl_info.fcp.bootprog);
572 printk(KERN_EMERG "br_lba: %lld\n",
573 (unsigned long long)fcp_block->ipl_info.fcp.br_lba);
574 printk(KERN_EMERG "device: %llx\n",
575 (unsigned long long)fcp_block->ipl_info.fcp.devno);
576 printk(KERN_EMERG "opt: %x\n", fcp_block->ipl_info.fcp.opt);
577}
578
579void do_reipl(void)
580{
581 struct ccw_dev_id devid;
582 static char buf[100];
583
584 switch (reipl_type) {
585 case IPL_TYPE_CCW:
586 printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n",
587 reipl_block_ccw->ipl_info.ccw.devno);
588 break;
589 case IPL_TYPE_FCP:
590 printk(KERN_EMERG "reboot on fcp device:\n");
591 print_fcp_block(reipl_block_fcp);
592 break;
593 default:
594 break;
595 }
596
597 switch (reipl_method) {
598 case IPL_METHOD_CCW_CIO:
599 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
600 devid.ssid = 0;
601 reipl_ccw_dev(&devid);
602 break;
603 case IPL_METHOD_CCW_VM:
604 sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno);
605 cpcmd(buf, NULL, 0, NULL);
606 break;
607 case IPL_METHOD_CCW_DIAG:
608 diag308(DIAG308_SET, reipl_block_ccw);
609 diag308(DIAG308_IPL, NULL);
610 break;
611 case IPL_METHOD_FCP_RW_DIAG:
612 diag308(DIAG308_SET, reipl_block_fcp);
613 diag308(DIAG308_IPL, NULL);
614 break;
615 case IPL_METHOD_FCP_RO_DIAG:
616 diag308(DIAG308_IPL, NULL);
617 break;
618 case IPL_METHOD_FCP_RO_VM:
619 cpcmd("IPL", NULL, 0, NULL);
620 break;
621 case IPL_METHOD_NONE:
622 default:
623 if (MACHINE_IS_VM)
624 cpcmd("IPL", NULL, 0, NULL);
625 diag308(DIAG308_IPL, NULL);
626 break;
627 }
628 panic("reipl failed!\n");
629}
630
631static void do_dump(void)
632{
633 struct ccw_dev_id devid;
634 static char buf[100];
635
636 switch (dump_type) {
637 case IPL_TYPE_CCW:
638 printk(KERN_EMERG "Automatic dump on ccw device: 0.0.%04x\n",
639 dump_block_ccw->ipl_info.ccw.devno);
640 break;
641 case IPL_TYPE_FCP:
642 printk(KERN_EMERG "Automatic dump on fcp device:\n");
643 print_fcp_block(dump_block_fcp);
644 break;
645 default:
646 return;
647 }
648
649 switch (dump_method) {
650 case IPL_METHOD_CCW_CIO:
651 dump_smp_stop_all();
652 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
653 devid.ssid = 0;
654 reipl_ccw_dev(&devid);
655 break;
656 case IPL_METHOD_CCW_VM:
657 dump_smp_stop_all();
658 sprintf(buf, "STORE STATUS");
659 cpcmd(buf, NULL, 0, NULL);
660 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
661 cpcmd(buf, NULL, 0, NULL);
662 break;
663 case IPL_METHOD_CCW_DIAG:
664 diag308(DIAG308_SET, dump_block_ccw);
665 diag308(DIAG308_DUMP, NULL);
666 break;
667 case IPL_METHOD_FCP_RW_DIAG:
668 diag308(DIAG308_SET, dump_block_fcp);
669 diag308(DIAG308_DUMP, NULL);
670 break;
671 case IPL_METHOD_NONE:
672 default:
673 return;
674 }
675 printk(KERN_EMERG "Dump failed!\n");
676}
677
678/* init functions */
679
680static int __init ipl_register_fcp_files(void)
681{
682 int rc;
683
684 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
685 &ipl_fcp_attr_group);
686 if (rc)
687 goto out;
688 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
689 &ipl_parameter_attr);
690 if (rc)
691 goto out_ipl_parm;
692 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
693 &ipl_scp_data_attr);
694 if (!rc)
695 goto out;
696
697 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
698
699out_ipl_parm:
700 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
701out:
702 return rc;
703}
704
705static int __init ipl_init(void)
706{
707 int rc;
708
709 rc = firmware_register(&ipl_subsys);
710 if (rc)
711 return rc;
712 switch (ipl_get_type()) {
713 case IPL_TYPE_CCW:
714 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
715 &ipl_ccw_attr_group);
716 break;
717 case IPL_TYPE_FCP:
718 rc = ipl_register_fcp_files();
719 break;
720 default:
721 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
722 &ipl_unknown_attr_group);
723 break;
724 }
725 if (rc)
726 firmware_unregister(&ipl_subsys);
727 return rc;
728}
729
730static void __init reipl_probe(void)
731{
732 void *buffer;
733
734 buffer = (void *) get_zeroed_page(GFP_KERNEL);
735 if (!buffer)
736 return;
737 if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK)
738 diag308_set_works = 1;
739 free_page((unsigned long)buffer);
740}
741
742static int __init reipl_ccw_init(void)
743{
744 int rc;
745
746 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
747 if (!reipl_block_ccw)
748 return -ENOMEM;
749 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_ccw_attr_group);
750 if (rc) {
751 free_page((unsigned long)reipl_block_ccw);
752 return rc;
753 }
754 reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
755 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
756 reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
757 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
758 if (ipl_get_type() == IPL_TYPE_CCW)
759 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
760 reipl_capabilities |= IPL_TYPE_CCW;
761 return 0;
762}
763
764static int __init reipl_fcp_init(void)
765{
766 int rc;
767
768 if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP))
769 return 0;
770 if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP))
771 make_attrs_ro(reipl_fcp_attrs);
772
773 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
774 if (!reipl_block_fcp)
775 return -ENOMEM;
776 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_fcp_attr_group);
777 if (rc) {
778 free_page((unsigned long)reipl_block_fcp);
779 return rc;
780 }
781 if (ipl_get_type() == IPL_TYPE_FCP) {
782 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
783 } else {
784 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
785 reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
786 reipl_block_fcp->hdr.blk0_len =
787 sizeof(reipl_block_fcp->ipl_info.fcp);
788 reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
789 reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
790 }
791 reipl_capabilities |= IPL_TYPE_FCP;
792 return 0;
793}
794
795static int __init reipl_init(void)
796{
797 int rc;
798
799 rc = firmware_register(&reipl_subsys);
800 if (rc)
801 return rc;
802 rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
803 if (rc) {
804 firmware_unregister(&reipl_subsys);
805 return rc;
806 }
807 rc = reipl_ccw_init();
808 if (rc)
809 return rc;
810 rc = reipl_fcp_init();
811 if (rc)
812 return rc;
813 rc = reipl_set_type(ipl_get_type());
814 if (rc)
815 return rc;
816 return 0;
817}
818
819static int __init dump_ccw_init(void)
820{
821 int rc;
822
823 dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
824 if (!dump_block_ccw)
825 return -ENOMEM;
826 rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_ccw_attr_group);
827 if (rc) {
828 free_page((unsigned long)dump_block_ccw);
829 return rc;
830 }
831 dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
832 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
833 dump_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
834 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
835 dump_capabilities |= IPL_TYPE_CCW;
836 return 0;
837}
838
839extern char s390_readinfo_sccb[];
840
841static int __init dump_fcp_init(void)
842{
843 int rc;
844
845 if(!(s390_readinfo_sccb[91] & 0x2))
846 return 0; /* LDIPL DUMP is not installed */
847 if (!diag308_set_works)
848 return 0;
849 dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
850 if (!dump_block_fcp)
851 return -ENOMEM;
852 rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_fcp_attr_group);
853 if (rc) {
854 free_page((unsigned long)dump_block_fcp);
855 return rc;
856 }
857 dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
858 dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
859 dump_block_fcp->hdr.blk0_len = sizeof(dump_block_fcp->ipl_info.fcp);
860 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
861 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
862 dump_capabilities |= IPL_TYPE_FCP;
863 return 0;
864}
865
866#define SHUTDOWN_ON_PANIC_PRIO 0
867
868static int shutdown_on_panic_notify(struct notifier_block *self,
869 unsigned long event, void *data)
870{
871 if (on_panic_action == SHUTDOWN_DUMP)
872 do_dump();
873 else if (on_panic_action == SHUTDOWN_REIPL)
874 do_reipl();
875 return NOTIFY_OK;
876}
877
878static struct notifier_block shutdown_on_panic_nb = {
879 .notifier_call = shutdown_on_panic_notify,
880 .priority = SHUTDOWN_ON_PANIC_PRIO
881};
882
883static int __init dump_init(void)
884{
885 int rc;
886
887 rc = firmware_register(&dump_subsys);
888 if (rc)
889 return rc;
890 rc = subsys_create_file(&dump_subsys, &dump_type_attr);
891 if (rc) {
892 firmware_unregister(&dump_subsys);
893 return rc;
894 }
895 rc = dump_ccw_init();
896 if (rc)
897 return rc;
898 rc = dump_fcp_init();
899 if (rc)
900 return rc;
901 dump_set_type(IPL_TYPE_NONE);
902 return 0;
903}
904
905static int __init shutdown_actions_init(void)
906{
907 int rc;
908
909 rc = firmware_register(&shutdown_actions_subsys);
910 if (rc)
911 return rc;
912 rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
913 if (rc) {
914 firmware_unregister(&shutdown_actions_subsys);
915 return rc;
916 }
917 atomic_notifier_chain_register(&panic_notifier_list,
918 &shutdown_on_panic_nb);
919 return 0;
920}
921
922static int __init s390_ipl_init(void)
923{
924 int rc;
925
926 reipl_probe();
927 rc = ipl_init();
928 if (rc)
929 return rc;
930 rc = reipl_init();
931 if (rc)
932 return rc;
933 rc = dump_init();
934 if (rc)
935 return rc;
936 rc = shutdown_actions_init();
937 if (rc)
938 return rc;
939 return 0;
940}
941
942__initcall(s390_ipl_init);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
new file mode 100644
index 000000000000..ca28fb0b3790
--- /dev/null
+++ b/arch/s390/kernel/kprobes.c
@@ -0,0 +1,657 @@
1/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2006
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
23#include <linux/config.h>
24#include <linux/kprobes.h>
25#include <linux/ptrace.h>
26#include <linux/preempt.h>
27#include <linux/stop_machine.h>
28#include <asm/cacheflush.h>
29#include <asm/kdebug.h>
30#include <asm/sections.h>
31#include <asm/uaccess.h>
32#include <linux/module.h>
33
34DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
35DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36
37int __kprobes arch_prepare_kprobe(struct kprobe *p)
38{
39 /* Make sure the probe isn't going on a difficult instruction */
40 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
41 return -EINVAL;
42
43 if ((unsigned long)p->addr & 0x01) {
44 printk("Attempt to register kprobe at an unaligned address\n");
45 return -EINVAL;
46 }
47
48 /* Use the get_insn_slot() facility for correctness */
49 if (!(p->ainsn.insn = get_insn_slot()))
50 return -ENOMEM;
51
52 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
53
54 get_instruction_type(&p->ainsn);
55 p->opcode = *p->addr;
56 return 0;
57}
58
59int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
60{
61 switch (*(__u8 *) instruction) {
62 case 0x0c: /* bassm */
63 case 0x0b: /* bsm */
64 case 0x83: /* diag */
65 case 0x44: /* ex */
66 return -EINVAL;
67 }
68 switch (*(__u16 *) instruction) {
69 case 0x0101: /* pr */
70 case 0xb25a: /* bsa */
71 case 0xb240: /* bakr */
72 case 0xb258: /* bsg */
73 case 0xb218: /* pc */
74 case 0xb228: /* pt */
75 return -EINVAL;
76 }
77 return 0;
78}
79
80void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
81{
82 /* default fixup method */
83 ainsn->fixup = FIXUP_PSW_NORMAL;
84
85 /* save r1 operand */
86 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
87
88 /* save the instruction length (pop 5-5) in bytes */
89 switch (*(__u8 *) (ainsn->insn) >> 4) {
90 case 0:
91 ainsn->ilen = 2;
92 break;
93 case 1:
94 case 2:
95 ainsn->ilen = 4;
96 break;
97 case 3:
98 ainsn->ilen = 6;
99 break;
100 }
101
102 switch (*(__u8 *) ainsn->insn) {
103 case 0x05: /* balr */
104 case 0x0d: /* basr */
105 ainsn->fixup = FIXUP_RETURN_REGISTER;
106 /* if r2 = 0, no branch will be taken */
107 if ((*ainsn->insn & 0x0f) == 0)
108 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
109 break;
110 case 0x06: /* bctr */
111 case 0x07: /* bcr */
112 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
113 break;
114 case 0x45: /* bal */
115 case 0x4d: /* bas */
116 ainsn->fixup = FIXUP_RETURN_REGISTER;
117 break;
118 case 0x47: /* bc */
119 case 0x46: /* bct */
120 case 0x86: /* bxh */
121 case 0x87: /* bxle */
122 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
123 break;
124 case 0x82: /* lpsw */
125 ainsn->fixup = FIXUP_NOT_REQUIRED;
126 break;
127 case 0xb2: /* lpswe */
128 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
129 ainsn->fixup = FIXUP_NOT_REQUIRED;
130 }
131 break;
132 case 0xa7: /* bras */
133 if ((*ainsn->insn & 0x0f) == 0x05) {
134 ainsn->fixup |= FIXUP_RETURN_REGISTER;
135 }
136 break;
137 case 0xc0:
138 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
139 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */
140 ainsn->fixup |= FIXUP_RETURN_REGISTER;
141 break;
142 case 0xeb:
143 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
144 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
145 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
146 }
147 break;
148 case 0xe3: /* bctg */
149 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
150 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
151 }
152 break;
153 }
154}
155
156static int __kprobes swap_instruction(void *aref)
157{
158 struct ins_replace_args *args = aref;
159 int err = -EFAULT;
160
161 asm volatile(
162 "0: mvc 0(2,%2),0(%3)\n"
163 "1: la %0,0\n"
164 "2:\n"
165 EX_TABLE(0b,2b)
166 : "+d" (err), "=m" (*args->ptr)
167 : "a" (args->ptr), "a" (&args->new), "m" (args->new));
168 return err;
169}
170
171void __kprobes arch_arm_kprobe(struct kprobe *p)
172{
173 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
174 unsigned long status = kcb->kprobe_status;
175 struct ins_replace_args args;
176
177 args.ptr = p->addr;
178 args.old = p->opcode;
179 args.new = BREAKPOINT_INSTRUCTION;
180
181 kcb->kprobe_status = KPROBE_SWAP_INST;
182 stop_machine_run(swap_instruction, &args, NR_CPUS);
183 kcb->kprobe_status = status;
184}
185
186void __kprobes arch_disarm_kprobe(struct kprobe *p)
187{
188 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
189 unsigned long status = kcb->kprobe_status;
190 struct ins_replace_args args;
191
192 args.ptr = p->addr;
193 args.old = BREAKPOINT_INSTRUCTION;
194 args.new = p->opcode;
195
196 kcb->kprobe_status = KPROBE_SWAP_INST;
197 stop_machine_run(swap_instruction, &args, NR_CPUS);
198 kcb->kprobe_status = status;
199}
200
201void __kprobes arch_remove_kprobe(struct kprobe *p)
202{
203 mutex_lock(&kprobe_mutex);
204 free_insn_slot(p->ainsn.insn);
205 mutex_unlock(&kprobe_mutex);
206}
207
208static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
209{
210 per_cr_bits kprobe_per_regs[1];
211
212 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
213 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
214
215 /* Set up the per control reg info, will pass to lctl */
216 kprobe_per_regs[0].em_instruction_fetch = 1;
217 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
218 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
219
220 /* Set the PER control regs, turns on single step for this address */
221 __ctl_load(kprobe_per_regs, 9, 11);
222 regs->psw.mask |= PSW_MASK_PER;
223 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
224}
225
226static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
227{
228 kcb->prev_kprobe.kp = kprobe_running();
229 kcb->prev_kprobe.status = kcb->kprobe_status;
230 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
231 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
232 sizeof(kcb->kprobe_saved_ctl));
233}
234
235static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
236{
237 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
238 kcb->kprobe_status = kcb->prev_kprobe.status;
239 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
240 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
241 sizeof(kcb->kprobe_saved_ctl));
242}
243
244static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
245 struct kprobe_ctlblk *kcb)
246{
247 __get_cpu_var(current_kprobe) = p;
248 /* Save the interrupt and per flags */
249 kcb->kprobe_saved_imask = regs->psw.mask &
250 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
251 /* Save the control regs that govern PER */
252 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
253}
254
255/* Called with kretprobe_lock held */
256void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
257 struct pt_regs *regs)
258{
259 struct kretprobe_instance *ri;
260
261 if ((ri = get_free_rp_inst(rp)) != NULL) {
262 ri->rp = rp;
263 ri->task = current;
264 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
265
266 /* Replace the return addr with trampoline addr */
267 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
268
269 add_rp_inst(ri);
270 } else {
271 rp->nmissed++;
272 }
273}
274
275static int __kprobes kprobe_handler(struct pt_regs *regs)
276{
277 struct kprobe *p;
278 int ret = 0;
279 unsigned long *addr = (unsigned long *)
280 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
281 struct kprobe_ctlblk *kcb;
282
283 /*
284 * We don't want to be preempted for the entire
285 * duration of kprobe processing
286 */
287 preempt_disable();
288 kcb = get_kprobe_ctlblk();
289
290 /* Check we're not actually recursing */
291 if (kprobe_running()) {
292 p = get_kprobe(addr);
293 if (p) {
294 if (kcb->kprobe_status == KPROBE_HIT_SS &&
295 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
296 regs->psw.mask &= ~PSW_MASK_PER;
297 regs->psw.mask |= kcb->kprobe_saved_imask;
298 goto no_kprobe;
299 }
300 /* We have reentered the kprobe_handler(), since
301 * another probe was hit while within the handler.
302 * We here save the original kprobes variables and
303 * just single step on the instruction of the new probe
304 * without calling any user handlers.
305 */
306 save_previous_kprobe(kcb);
307 set_current_kprobe(p, regs, kcb);
308 kprobes_inc_nmissed_count(p);
309 prepare_singlestep(p, regs);
310 kcb->kprobe_status = KPROBE_REENTER;
311 return 1;
312 } else {
313 p = __get_cpu_var(current_kprobe);
314 if (p->break_handler && p->break_handler(p, regs)) {
315 goto ss_probe;
316 }
317 }
318 goto no_kprobe;
319 }
320
321 p = get_kprobe(addr);
322 if (!p) {
323 if (*addr != BREAKPOINT_INSTRUCTION) {
324 /*
325 * The breakpoint instruction was removed right
326 * after we hit it. Another cpu has removed
327 * either a probepoint or a debugger breakpoint
328 * at this address. In either case, no further
329 * handling of this interrupt is appropriate.
330 *
331 */
332 ret = 1;
333 }
334 /* Not one of ours: let kernel handle it */
335 goto no_kprobe;
336 }
337
338 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
339 set_current_kprobe(p, regs, kcb);
340 if (p->pre_handler && p->pre_handler(p, regs))
341 /* handler has already set things up, so skip ss setup */
342 return 1;
343
344ss_probe:
345 prepare_singlestep(p, regs);
346 kcb->kprobe_status = KPROBE_HIT_SS;
347 return 1;
348
349no_kprobe:
350 preempt_enable_no_resched();
351 return ret;
352}
353
354/*
355 * Function return probe trampoline:
356 * - init_kprobes() establishes a probepoint here
357 * - When the probed function returns, this probe
358 * causes the handlers to fire
359 */
360void __kprobes kretprobe_trampoline_holder(void)
361{
362 asm volatile(".global kretprobe_trampoline\n"
363 "kretprobe_trampoline: bcr 0,0\n");
364}
365
366/*
367 * Called when the probe at kretprobe trampoline is hit
368 */
369int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
370{
371 struct kretprobe_instance *ri = NULL;
372 struct hlist_head *head;
373 struct hlist_node *node, *tmp;
374 unsigned long flags, orig_ret_address = 0;
375 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
376
377 spin_lock_irqsave(&kretprobe_lock, flags);
378 head = kretprobe_inst_table_head(current);
379
380 /*
381 * It is possible to have multiple instances associated with a given
382 * task either because an multiple functions in the call path
383 * have a return probe installed on them, and/or more then one return
384 * return probe was registered for a target function.
385 *
386 * We can handle this because:
387 * - instances are always inserted at the head of the list
388 * - when multiple return probes are registered for the same
389 * function, the first instance's ret_addr will point to the
390 * real return address, and all the rest will point to
391 * kretprobe_trampoline
392 */
393 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
394 if (ri->task != current)
395 /* another task is sharing our hash bucket */
396 continue;
397
398 if (ri->rp && ri->rp->handler)
399 ri->rp->handler(ri, regs);
400
401 orig_ret_address = (unsigned long)ri->ret_addr;
402 recycle_rp_inst(ri);
403
404 if (orig_ret_address != trampoline_address) {
405 /*
406 * This is the real return address. Any other
407 * instances associated with this task are for
408 * other calls deeper on the call stack
409 */
410 break;
411 }
412 }
413 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
414 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
415
416 reset_current_kprobe();
417 spin_unlock_irqrestore(&kretprobe_lock, flags);
418 preempt_enable_no_resched();
419
420 /*
421 * By returning a non-zero value, we are telling
422 * kprobe_handler() that we don't want the post_handler
423 * to run (and have re-enabled preemption)
424 */
425 return 1;
426}
427
428/*
429 * Called after single-stepping. p->addr is the address of the
430 * instruction whose first byte has been replaced by the "breakpoint"
431 * instruction. To avoid the SMP problems that can occur when we
432 * temporarily put back the original opcode to single-step, we
433 * single-stepped a copy of the instruction. The address of this
434 * copy is p->ainsn.insn.
435 */
436static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
437{
438 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
439
440 regs->psw.addr &= PSW_ADDR_INSN;
441
442 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
443 regs->psw.addr = (unsigned long)p->addr +
444 ((unsigned long)regs->psw.addr -
445 (unsigned long)p->ainsn.insn);
446
447 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
448 if ((unsigned long)regs->psw.addr -
449 (unsigned long)p->ainsn.insn == p->ainsn.ilen)
450 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
451
452 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
453 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
454 (regs->gprs[p->ainsn.reg] -
455 (unsigned long)p->ainsn.insn))
456 | PSW_ADDR_AMODE;
457
458 regs->psw.addr |= PSW_ADDR_AMODE;
459 /* turn off PER mode */
460 regs->psw.mask &= ~PSW_MASK_PER;
461 /* Restore the original per control regs */
462 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
463 regs->psw.mask |= kcb->kprobe_saved_imask;
464}
465
466static int __kprobes post_kprobe_handler(struct pt_regs *regs)
467{
468 struct kprobe *cur = kprobe_running();
469 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
470
471 if (!cur)
472 return 0;
473
474 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
475 kcb->kprobe_status = KPROBE_HIT_SSDONE;
476 cur->post_handler(cur, regs, 0);
477 }
478
479 resume_execution(cur, regs);
480
481 /*Restore back the original saved kprobes variables and continue. */
482 if (kcb->kprobe_status == KPROBE_REENTER) {
483 restore_previous_kprobe(kcb);
484 goto out;
485 }
486 reset_current_kprobe();
487out:
488 preempt_enable_no_resched();
489
490 /*
491 * if somebody else is singlestepping across a probe point, psw mask
492 * will have PER set, in which case, continue the remaining processing
493 * of do_single_step, as if this is not a probe hit.
494 */
495 if (regs->psw.mask & PSW_MASK_PER) {
496 return 0;
497 }
498
499 return 1;
500}
501
502static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
503{
504 struct kprobe *cur = kprobe_running();
505 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
506 const struct exception_table_entry *entry;
507
508 switch(kcb->kprobe_status) {
509 case KPROBE_SWAP_INST:
510 /* We are here because the instruction replacement failed */
511 return 0;
512 case KPROBE_HIT_SS:
513 case KPROBE_REENTER:
514 /*
515 * We are here because the instruction being single
516 * stepped caused a page fault. We reset the current
517 * kprobe and the nip points back to the probe address
518 * and allow the page fault handler to continue as a
519 * normal page fault.
520 */
521 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
522 regs->psw.mask &= ~PSW_MASK_PER;
523 regs->psw.mask |= kcb->kprobe_saved_imask;
524 if (kcb->kprobe_status == KPROBE_REENTER)
525 restore_previous_kprobe(kcb);
526 else
527 reset_current_kprobe();
528 preempt_enable_no_resched();
529 break;
530 case KPROBE_HIT_ACTIVE:
531 case KPROBE_HIT_SSDONE:
532 /*
533 * We increment the nmissed count for accounting,
534 * we can also use npre/npostfault count for accouting
535 * these specific fault cases.
536 */
537 kprobes_inc_nmissed_count(cur);
538
539 /*
540 * We come here because instructions in the pre/post
541 * handler caused the page_fault, this could happen
542 * if handler tries to access user space by
543 * copy_from_user(), get_user() etc. Let the
544 * user-specified handler try to fix it first.
545 */
546 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
547 return 1;
548
549 /*
550 * In case the user-specified fault handler returned
551 * zero, try to fix up.
552 */
553 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
554 if (entry) {
555 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
556 return 1;
557 }
558
559 /*
560 * fixup_exception() could not handle it,
561 * Let do_page_fault() fix it.
562 */
563 break;
564 default:
565 break;
566 }
567 return 0;
568}
569
570/*
571 * Wrapper routine to for handling exceptions.
572 */
573int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
574 unsigned long val, void *data)
575{
576 struct die_args *args = (struct die_args *)data;
577 int ret = NOTIFY_DONE;
578
579 switch (val) {
580 case DIE_BPT:
581 if (kprobe_handler(args->regs))
582 ret = NOTIFY_STOP;
583 break;
584 case DIE_SSTEP:
585 if (post_kprobe_handler(args->regs))
586 ret = NOTIFY_STOP;
587 break;
588 case DIE_TRAP:
589 case DIE_PAGE_FAULT:
590 /* kprobe_running() needs smp_processor_id() */
591 preempt_disable();
592 if (kprobe_running() &&
593 kprobe_fault_handler(args->regs, args->trapnr))
594 ret = NOTIFY_STOP;
595 preempt_enable();
596 break;
597 default:
598 break;
599 }
600 return ret;
601}
602
603int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
604{
605 struct jprobe *jp = container_of(p, struct jprobe, kp);
606 unsigned long addr;
607 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
608
609 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
610
611 /* setup return addr to the jprobe handler routine */
612 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
613
614 /* r14 is the function return address */
615 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
616 /* r15 is the stack pointer */
617 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
618 addr = (unsigned long)kcb->jprobe_saved_r15;
619
620 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
621 MIN_STACK_SIZE(addr));
622 return 1;
623}
624
625void __kprobes jprobe_return(void)
626{
627 asm volatile(".word 0x0002");
628}
629
630void __kprobes jprobe_return_end(void)
631{
632 asm volatile("bcr 0,0");
633}
634
635int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
636{
637 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
638 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
639
640 /* Put the regs back */
641 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
642 /* put the stack back */
643 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
644 MIN_STACK_SIZE(stack_addr));
645 preempt_enable_no_resched();
646 return 1;
647}
648
649static struct kprobe trampoline_p = {
650 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
651 .pre_handler = trampoline_probe_handler
652};
653
654int __init arch_init_kprobes(void)
655{
656 return register_kprobe(&trampoline_p);
657}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 658e5ac484f9..4562cdbce8eb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -8,13 +8,30 @@
8 8
9#include <asm/lowcore.h> 9#include <asm/lowcore.h>
10 10
11 .globl do_reipl 11 .globl do_reipl_asm
12do_reipl: basr %r13,0 12do_reipl_asm: basr %r13,0
13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) 13.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13) 14
15 stctl %c0,%c0,.Lctlsave-.Lpg0(%r13) 15 # switch off lowcore protection
16 ni .Lctlsave-.Lpg0(%r13),0xef 16
17 lctl %c0,%c0,.Lctlsave-.Lpg0(%r13) 17.Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
18 stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13)
19 ni .Lctlsave1-.Lpg0(%r13),0xef
20 lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
21
22 # do store status of all registers
23
24 stm %r0,%r15,__LC_GPREGS_SAVE_AREA
25 stctl %c0,%c15,__LC_CREGS_SAVE_AREA
26 mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13)
27 stam %a0,%a15,__LC_AREGS_SAVE_AREA
28 stpx __LC_PREFIX_SAVE_AREA
29 stckc .Lclkcmp-.Lpg0(%r13)
30 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
31 stpt __LC_CPU_TIMER_SAVE_AREA
32 st %r13, __LC_PSW_SAVE_AREA+4
33
34 lctl %c6,%c6,.Lall-.Lpg0(%r13)
18 lr %r1,%r2 35 lr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) 36 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13) 37 stsch .Lschib-.Lpg0(%r13)
@@ -46,9 +63,11 @@ do_reipl: basr %r13,0
46.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) 63.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
47 lpsw .Ldispsw-.Lpg0(%r13) 64 lpsw .Ldispsw-.Lpg0(%r13)
48 .align 8 65 .align 8
66.Lclkcmp: .quad 0x0000000000000000
49.Lall: .long 0xff000000 67.Lall: .long 0xff000000
50.Lnull: .long 0x00000000 68.Lnull: .long 0x00000000
51.Lctlsave: .long 0x00000000 69.Lctlsave1: .long 0x00000000
70.Lctlsave2: .long 0x00000000
52 .align 8 71 .align 8
53.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 72.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
54.Lpcnew: .long 0x00080000,0x80000000+.Lecs 73.Lpcnew: .long 0x00080000,0x80000000+.Lecs
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 4d090d60f3ef..95bd1e234f63 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -8,13 +8,30 @@
8 */ 8 */
9 9
10#include <asm/lowcore.h> 10#include <asm/lowcore.h>
11 .globl do_reipl 11 .globl do_reipl_asm
12do_reipl: basr %r13,0 12do_reipl_asm: basr %r13,0
13.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) 13
14 # do store status of all registers
15
16.Lpg0: stg %r1,.Lregsave-.Lpg0(%r13)
17 lghi %r1,0x1000
18 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
19 lg %r0,.Lregsave-.Lpg0(%r13)
20 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
21 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
22 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
23 stpx __LC_PREFIX_SAVE_AREA-0x1000(%r1)
24 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
25 stckc .Lclkcmp-.Lpg0(%r13)
26 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
27 stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
28 stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
29
30 lpswe .Lnewpsw-.Lpg0(%r13)
14.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) 31.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
15 stctg %c0,%c0,.Lctlsave-.Lpg0(%r13) 32 stctg %c0,%c0,.Lregsave-.Lpg0(%r13)
16 ni .Lctlsave+4-.Lpg0(%r13),0xef 33 ni .Lregsave+4-.Lpg0(%r13),0xef
17 lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13) 34 lctlg %c0,%c0,.Lregsave-.Lpg0(%r13)
18 lgr %r1,%r2 35 lgr %r1,%r2
19 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) 36 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
20 stsch .Lschib-.Lpg0(%r13) 37 stsch .Lschib-.Lpg0(%r13)
@@ -50,8 +67,9 @@ do_reipl: basr %r13,0
50 st %r14,.Ldispsw+12-.Lpg0(%r13) 67 st %r14,.Ldispsw+12-.Lpg0(%r13)
51 lpswe .Ldispsw-.Lpg0(%r13) 68 lpswe .Ldispsw-.Lpg0(%r13)
52 .align 8 69 .align 8
70.Lclkcmp: .quad 0x0000000000000000
53.Lall: .quad 0x00000000ff000000 71.Lall: .quad 0x00000000ff000000
54.Lctlsave: .quad 0x0000000000000000 72.Lregsave: .quad 0x0000000000000000
55.Lnull: .long 0x0000000000000000 73.Lnull: .long 0x0000000000000000
56 .align 16 74 .align 16
57/* 75/*
@@ -92,5 +110,3 @@ do_reipl: basr %r13,0
92 .long 0x00000000,0x00000000 110 .long 0x00000000,0x00000000
93 .long 0x00000000,0x00000000 111 .long 0x00000000,0x00000000
94 112
95
96
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
deleted file mode 100644
index 1f33951ba439..000000000000
--- a/arch/s390/kernel/reipl_diag.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * This file contains the implementation of the
3 * Linux re-IPL support
4 *
5 * (C) Copyright IBM Corp. 2005
6 *
7 * Author(s): Volker Sameske (sameske@de.ibm.com)
8 *
9 */
10
11#include <linux/kernel.h>
12
13static unsigned int reipl_diag_rc1;
14static unsigned int reipl_diag_rc2;
15
16/*
17 * re-IPL the system using the last used IPL parameters
18 */
19void reipl_diag(void)
20{
21 asm volatile (
22 " la %%r4,0\n"
23 " la %%r5,0\n"
24 " diag %%r4,%2,0x308\n"
25 "0:\n"
26 " st %%r4,%0\n"
27 " st %%r5,%1\n"
28 ".section __ex_table,\"a\"\n"
29#ifdef CONFIG_64BIT
30 " .align 8\n"
31 " .quad 0b, 0b\n"
32#else
33 " .align 4\n"
34 " .long 0b, 0b\n"
35#endif
36 ".previous\n"
37 : "=m" (reipl_diag_rc1), "=m" (reipl_diag_rc2)
38 : "d" (3) : "cc", "4", "5" );
39}
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index c73a45467fa4..9f19e833a562 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap);
25EXPORT_SYMBOL(_ni_bitmap); 25EXPORT_SYMBOL(_ni_bitmap);
26EXPORT_SYMBOL(_zb_findmap); 26EXPORT_SYMBOL(_zb_findmap);
27EXPORT_SYMBOL(_sb_findmap); 27EXPORT_SYMBOL(_sb_findmap);
28EXPORT_SYMBOL(__copy_from_user_asm);
29EXPORT_SYMBOL(__copy_to_user_asm);
30EXPORT_SYMBOL(__copy_in_user_asm);
31EXPORT_SYMBOL(__clear_user_asm);
32EXPORT_SYMBOL(__strncpy_from_user_asm);
33EXPORT_SYMBOL(__strnlen_user_asm);
34EXPORT_SYMBOL(diag10); 28EXPORT_SYMBOL(diag10);
35 29
36/* 30/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c902f059c7aa..e3d9325f6022 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -37,6 +37,7 @@
37#include <linux/kernel_stat.h> 37#include <linux/kernel_stat.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/system.h> 43#include <asm/system.h>
@@ -50,6 +51,12 @@
50#include <asm/sections.h> 51#include <asm/sections.h>
51 52
52/* 53/*
54 * User copy operations.
55 */
56struct uaccess_ops uaccess;
57EXPORT_SYMBOL_GPL(uaccess);
58
59/*
53 * Machine setup.. 60 * Machine setup..
54 */ 61 */
55unsigned int console_mode = 0; 62unsigned int console_mode = 0;
@@ -284,16 +291,9 @@ void (*_machine_power_off)(void) = machine_power_off_smp;
284/* 291/*
285 * Reboot, halt and power_off routines for non SMP. 292 * Reboot, halt and power_off routines for non SMP.
286 */ 293 */
287extern void reipl(unsigned long devno);
288extern void reipl_diag(void);
289static void do_machine_restart_nonsmp(char * __unused) 294static void do_machine_restart_nonsmp(char * __unused)
290{ 295{
291 reipl_diag(); 296 do_reipl();
292
293 if (MACHINE_IS_VM)
294 cpcmd ("IPL", NULL, 0, NULL);
295 else
296 reipl (0x10000 | S390_lowcore.ipl_device);
297} 297}
298 298
299static void do_machine_halt_nonsmp(void) 299static void do_machine_halt_nonsmp(void)
@@ -501,13 +501,47 @@ setup_memory(void)
501 * partially used pages are not usable - thus 501 * partially used pages are not usable - thus
502 * we are rounding upwards: 502 * we are rounding upwards:
503 */ 503 */
504 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; 504 start_pfn = PFN_UP(__pa(&_end));
505 end_pfn = max_pfn = memory_end >> PAGE_SHIFT; 505 end_pfn = max_pfn = PFN_DOWN(memory_end);
506 506
507 /* Initialize storage key for kernel pages */ 507 /* Initialize storage key for kernel pages */
508 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) 508 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
509 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 509 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
510 510
511#ifdef CONFIG_BLK_DEV_INITRD
512 /*
513 * Move the initrd in case the bitmap of the bootmem allocater
514 * would overwrite it.
515 */
516
517 if (INITRD_START && INITRD_SIZE) {
518 unsigned long bmap_size;
519 unsigned long start;
520
521 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
522 bmap_size = PFN_PHYS(bmap_size);
523
524 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
525 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
526
527 if (start + INITRD_SIZE > memory_end) {
528 printk("initrd extends beyond end of memory "
529 "(0x%08lx > 0x%08lx)\n"
530 "disabling initrd\n",
531 start + INITRD_SIZE, memory_end);
532 INITRD_START = INITRD_SIZE = 0;
533 } else {
534 printk("Moving initrd (0x%08lx -> 0x%08lx, "
535 "size: %ld)\n",
536 INITRD_START, start, INITRD_SIZE);
537 memmove((void *) start, (void *) INITRD_START,
538 INITRD_SIZE);
539 INITRD_START = start;
540 }
541 }
542 }
543#endif
544
511 /* 545 /*
512 * Initialize the boot-time allocator (with low memory only): 546 * Initialize the boot-time allocator (with low memory only):
513 */ 547 */
@@ -559,7 +593,7 @@ setup_memory(void)
559 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); 593 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
560 594
561#ifdef CONFIG_BLK_DEV_INITRD 595#ifdef CONFIG_BLK_DEV_INITRD
562 if (INITRD_START) { 596 if (INITRD_START && INITRD_SIZE) {
563 if (INITRD_START + INITRD_SIZE <= memory_end) { 597 if (INITRD_START + INITRD_SIZE <= memory_end) {
564 reserve_bootmem(INITRD_START, INITRD_SIZE); 598 reserve_bootmem(INITRD_START, INITRD_SIZE);
565 initrd_start = INITRD_START; 599 initrd_start = INITRD_START;
@@ -613,6 +647,11 @@ setup_arch(char **cmdline_p)
613 647
614 memory_end = memory_size; 648 memory_end = memory_size;
615 649
650 if (MACHINE_HAS_MVCOS)
651 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
652 else
653 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
654
616 parse_early_param(); 655 parse_early_param();
617 656
618#ifndef CONFIG_64BIT 657#ifndef CONFIG_64BIT
@@ -720,214 +759,3 @@ struct seq_operations cpuinfo_op = {
720 .show = show_cpuinfo, 759 .show = show_cpuinfo,
721}; 760};
722 761
723#define DEFINE_IPL_ATTR(_name, _format, _value) \
724static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
725 char *page) \
726{ \
727 return sprintf(page, _format, _value); \
728} \
729static struct subsys_attribute ipl_##_name##_attr = \
730 __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
731
732DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
733 IPL_PARMBLOCK_START->fcp.wwpn);
734DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
735 IPL_PARMBLOCK_START->fcp.lun);
736DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
737 IPL_PARMBLOCK_START->fcp.bootprog);
738DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
739 IPL_PARMBLOCK_START->fcp.br_lba);
740
741enum ipl_type_type {
742 ipl_type_unknown,
743 ipl_type_ccw,
744 ipl_type_fcp,
745};
746
747static enum ipl_type_type
748get_ipl_type(void)
749{
750 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
751
752 if (!IPL_DEVNO_VALID)
753 return ipl_type_unknown;
754 if (!IPL_PARMBLOCK_VALID)
755 return ipl_type_ccw;
756 if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
757 return ipl_type_unknown;
758 if (ipl->fcp.pbt != IPL_TYPE_FCP)
759 return ipl_type_unknown;
760 return ipl_type_fcp;
761}
762
763static ssize_t
764ipl_type_show(struct subsystem *subsys, char *page)
765{
766 switch (get_ipl_type()) {
767 case ipl_type_ccw:
768 return sprintf(page, "ccw\n");
769 case ipl_type_fcp:
770 return sprintf(page, "fcp\n");
771 default:
772 return sprintf(page, "unknown\n");
773 }
774}
775
776static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
777
778static ssize_t
779ipl_device_show(struct subsystem *subsys, char *page)
780{
781 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
782
783 switch (get_ipl_type()) {
784 case ipl_type_ccw:
785 return sprintf(page, "0.0.%04x\n", ipl_devno);
786 case ipl_type_fcp:
787 return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
788 default:
789 return 0;
790 }
791}
792
793static struct subsys_attribute ipl_device_attr =
794 __ATTR(device, S_IRUGO, ipl_device_show, NULL);
795
796static struct attribute *ipl_fcp_attrs[] = {
797 &ipl_type_attr.attr,
798 &ipl_device_attr.attr,
799 &ipl_wwpn_attr.attr,
800 &ipl_lun_attr.attr,
801 &ipl_bootprog_attr.attr,
802 &ipl_br_lba_attr.attr,
803 NULL,
804};
805
806static struct attribute_group ipl_fcp_attr_group = {
807 .attrs = ipl_fcp_attrs,
808};
809
810static struct attribute *ipl_ccw_attrs[] = {
811 &ipl_type_attr.attr,
812 &ipl_device_attr.attr,
813 NULL,
814};
815
816static struct attribute_group ipl_ccw_attr_group = {
817 .attrs = ipl_ccw_attrs,
818};
819
820static struct attribute *ipl_unknown_attrs[] = {
821 &ipl_type_attr.attr,
822 NULL,
823};
824
825static struct attribute_group ipl_unknown_attr_group = {
826 .attrs = ipl_unknown_attrs,
827};
828
829static ssize_t
830ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
831{
832 unsigned int size = IPL_PARMBLOCK_SIZE;
833
834 if (off > size)
835 return 0;
836 if (off + count > size)
837 count = size - off;
838
839 memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
840 return count;
841}
842
843static struct bin_attribute ipl_parameter_attr = {
844 .attr = {
845 .name = "binary_parameter",
846 .mode = S_IRUGO,
847 .owner = THIS_MODULE,
848 },
849 .size = PAGE_SIZE,
850 .read = &ipl_parameter_read,
851};
852
853static ssize_t
854ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
855{
856 unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
857 void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
858
859 if (off > size)
860 return 0;
861 if (off + count > size)
862 count = size - off;
863
864 memcpy(buf, scp_data + off, count);
865 return count;
866}
867
868static struct bin_attribute ipl_scp_data_attr = {
869 .attr = {
870 .name = "scp_data",
871 .mode = S_IRUGO,
872 .owner = THIS_MODULE,
873 },
874 .size = PAGE_SIZE,
875 .read = &ipl_scp_data_read,
876};
877
878static decl_subsys(ipl, NULL, NULL);
879
880static int ipl_register_fcp_files(void)
881{
882 int rc;
883
884 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
885 &ipl_fcp_attr_group);
886 if (rc)
887 goto out;
888 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
889 &ipl_parameter_attr);
890 if (rc)
891 goto out_ipl_parm;
892 rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
893 &ipl_scp_data_attr);
894 if (!rc)
895 goto out;
896
897 sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
898
899out_ipl_parm:
900 sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
901out:
902 return rc;
903}
904
905static int __init
906ipl_device_sysfs_register(void) {
907 int rc;
908
909 rc = firmware_register(&ipl_subsys);
910 if (rc)
911 goto out;
912
913 switch (get_ipl_type()) {
914 case ipl_type_ccw:
915 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
916 &ipl_ccw_attr_group);
917 break;
918 case ipl_type_fcp:
919 rc = ipl_register_fcp_files();
920 break;
921 default:
922 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
923 &ipl_unknown_attr_group);
924 break;
925 }
926
927 if (rc)
928 firmware_unregister(&ipl_subsys);
929out:
930 return rc;
931}
932
933__initcall(ipl_device_sysfs_register);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index a887b686f279..642095ec7c07 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -114,29 +114,26 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
114static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) 114static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
115{ 115{
116 unsigned long old_mask = regs->psw.mask; 116 unsigned long old_mask = regs->psw.mask;
117 int err; 117 _sigregs user_sregs;
118 118
119 save_access_regs(current->thread.acrs); 119 save_access_regs(current->thread.acrs);
120 120
121 /* Copy a 'clean' PSW mask to the user to avoid leaking 121 /* Copy a 'clean' PSW mask to the user to avoid leaking
122 information about whether PER is currently on. */ 122 information about whether PER is currently on. */
123 regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 123 regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
124 err = __copy_to_user(&sregs->regs.psw, &regs->psw, 124 memcpy(&user_sregs.regs.psw, &regs->psw, sizeof(sregs->regs.psw) +
125 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); 125 sizeof(sregs->regs.gprs));
126 regs->psw.mask = old_mask; 126 regs->psw.mask = old_mask;
127 if (err != 0) 127 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
128 return err; 128 sizeof(sregs->regs.acrs));
129 err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
130 sizeof(sregs->regs.acrs));
131 if (err != 0)
132 return err;
133 /* 129 /*
134 * We have to store the fp registers to current->thread.fp_regs 130 * We have to store the fp registers to current->thread.fp_regs
135 * to merge them with the emulated registers. 131 * to merge them with the emulated registers.
136 */ 132 */
137 save_fp_regs(&current->thread.fp_regs); 133 save_fp_regs(&current->thread.fp_regs);
138 return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs, 134 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
139 sizeof(s390_fp_regs)); 135 sizeof(s390_fp_regs));
136 return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
140} 137}
141 138
142/* Returns positive number on error */ 139/* Returns positive number on error */
@@ -144,27 +141,25 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
144{ 141{
145 unsigned long old_mask = regs->psw.mask; 142 unsigned long old_mask = regs->psw.mask;
146 int err; 143 int err;
144 _sigregs user_sregs;
147 145
148 /* Alwys make any pending restarted system call return -EINTR */ 146 /* Alwys make any pending restarted system call return -EINTR */
149 current_thread_info()->restart_block.fn = do_no_restart_syscall; 147 current_thread_info()->restart_block.fn = do_no_restart_syscall;
150 148
151 err = __copy_from_user(&regs->psw, &sregs->regs.psw, 149 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
152 sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
153 regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask); 150 regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
154 regs->psw.addr |= PSW_ADDR_AMODE; 151 regs->psw.addr |= PSW_ADDR_AMODE;
155 if (err) 152 if (err)
156 return err; 153 return err;
157 err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs, 154 memcpy(&regs->psw, &user_sregs.regs.psw, sizeof(sregs->regs.psw) +
158 sizeof(sregs->regs.acrs)); 155 sizeof(sregs->regs.gprs));
159 if (err) 156 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
160 return err; 157 sizeof(sregs->regs.acrs));
161 restore_access_regs(current->thread.acrs); 158 restore_access_regs(current->thread.acrs);
162 159
163 err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs, 160 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
164 sizeof(s390_fp_regs)); 161 sizeof(s390_fp_regs));
165 current->thread.fp_regs.fpc &= FPC_VALID_MASK; 162 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
166 if (err)
167 return err;
168 163
169 restore_fp_regs(&current->thread.fp_regs); 164 restore_fp_regs(&current->thread.fp_regs);
170 regs->trap = -1; /* disable syscall checks */ 165 regs->trap = -1; /* disable syscall checks */
@@ -457,6 +452,7 @@ void do_signal(struct pt_regs *regs)
457 case -ERESTART_RESTARTBLOCK: 452 case -ERESTART_RESTARTBLOCK:
458 regs->gprs[2] = -EINTR; 453 regs->gprs[2] = -EINTR;
459 } 454 }
455 regs->trap = -1; /* Don't deal with this again. */
460 } 456 }
461 457
462 /* Get signal to deliver. When running under ptrace, at this point 458 /* Get signal to deliver. When running under ptrace, at this point
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8e03219eea76..b2e6f4c8d382 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,9 +59,6 @@ static struct task_struct *current_set[NR_CPUS];
59extern char vmhalt_cmd[]; 59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[]; 60extern char vmpoff_cmd[];
61 61
62extern void reipl(unsigned long devno);
63extern void reipl_diag(void);
64
65static void smp_ext_bitcall(int, ec_bit_sig); 62static void smp_ext_bitcall(int, ec_bit_sig);
66static void smp_ext_bitcall_others(ec_bit_sig); 63static void smp_ext_bitcall_others(ec_bit_sig);
67 64
@@ -279,12 +276,7 @@ static void do_machine_restart(void * __unused)
279 * interrupted by an external interrupt and s390irq 276 * interrupted by an external interrupt and s390irq
280 * locks are always held disabled). 277 * locks are always held disabled).
281 */ 278 */
282 reipl_diag(); 279 do_reipl();
283
284 if (MACHINE_IS_VM)
285 cpcmd ("IPL", NULL, 0, NULL);
286 else
287 reipl (0x10000 | S390_lowcore.ipl_device);
288} 280}
289 281
290void machine_restart_smp(char * __unused) 282void machine_restart_smp(char * __unused)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index bde1d1d59858..c4982c963424 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h>
32 33
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -39,6 +40,7 @@
39#include <asm/s390_ext.h> 40#include <asm/s390_ext.h>
40#include <asm/lowcore.h> 41#include <asm/lowcore.h>
41#include <asm/debug.h> 42#include <asm/debug.h>
43#include <asm/kdebug.h>
42 44
43/* Called from entry.S only */ 45/* Called from entry.S only */
44extern void handle_per_exception(struct pt_regs *regs); 46extern void handle_per_exception(struct pt_regs *regs);
@@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12;
74static int kstack_depth_to_print = 20; 76static int kstack_depth_to_print = 20;
75#endif /* CONFIG_64BIT */ 77#endif /* CONFIG_64BIT */
76 78
79ATOMIC_NOTIFIER_HEAD(s390die_chain);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 return atomic_notifier_chain_register(&s390die_chain, nb);
84}
85EXPORT_SYMBOL(register_die_notifier);
86
87int unregister_die_notifier(struct notifier_block *nb)
88{
89 return atomic_notifier_chain_unregister(&s390die_chain, nb);
90}
91EXPORT_SYMBOL(unregister_die_notifier);
92
77/* 93/*
78 * For show_trace we have tree different stack to consider: 94 * For show_trace we have tree different stack to consider:
79 * - the panic stack which is used if the kernel stack has overflown 95 * - the panic stack which is used if the kernel stack has overflown
@@ -305,8 +321,9 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
305#endif 321#endif
306} 322}
307 323
308static void inline do_trap(long interruption_code, int signr, char *str, 324static void __kprobes inline do_trap(long interruption_code, int signr,
309 struct pt_regs *regs, siginfo_t *info) 325 char *str, struct pt_regs *regs,
326 siginfo_t *info)
310{ 327{
311 /* 328 /*
312 * We got all needed information from the lowcore and can 329 * We got all needed information from the lowcore and can
@@ -315,6 +332,10 @@ static void inline do_trap(long interruption_code, int signr, char *str,
315 if (regs->psw.mask & PSW_MASK_PSTATE) 332 if (regs->psw.mask & PSW_MASK_PSTATE)
316 local_irq_enable(); 333 local_irq_enable();
317 334
335 if (notify_die(DIE_TRAP, str, regs, interruption_code,
336 interruption_code, signr) == NOTIFY_STOP)
337 return;
338
318 if (regs->psw.mask & PSW_MASK_PSTATE) { 339 if (regs->psw.mask & PSW_MASK_PSTATE) {
319 struct task_struct *tsk = current; 340 struct task_struct *tsk = current;
320 341
@@ -336,8 +357,12 @@ static inline void __user *get_check_address(struct pt_regs *regs)
336 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 357 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
337} 358}
338 359
339void do_single_step(struct pt_regs *regs) 360void __kprobes do_single_step(struct pt_regs *regs)
340{ 361{
362 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
363 SIGTRAP) == NOTIFY_STOP){
364 return;
365 }
341 if ((current->ptrace & PT_PTRACED) != 0) 366 if ((current->ptrace & PT_PTRACED) != 0)
342 force_sig(SIGTRAP, current); 367 force_sig(SIGTRAP, current);
343} 368}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ff5f7bb34f75..af9e69a03011 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -24,6 +24,7 @@ SECTIONS
24 *(.text) 24 *(.text)
25 SCHED_TEXT 25 SCHED_TEXT
26 LOCK_TEXT 26 LOCK_TEXT
27 KPROBES_TEXT
27 *(.fixup) 28 *(.fixup)
28 *(.gnu.warning) 29 *(.gnu.warning)
29 } = 0x0700 30 } = 0x0700
@@ -117,7 +118,7 @@ SECTIONS
117 118
118 /* Sections to be discarded */ 119 /* Sections to be discarded */
119 /DISCARD/ : { 120 /DISCARD/ : {
120 *(.exitcall.exit) 121 *(.exit.text) *(.exit.data) *(.exitcall.exit)
121 } 122 }
122 123
123 /* Stabs debugging sections. */ 124 /* Stabs debugging sections. */
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e05d087a6eae..c42ffedfdb49 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,6 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o 7lib-y += delay.o string.o uaccess_std.o
8lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
9lib-$(CONFIG_SMP) += spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S
deleted file mode 100644
index 837275284d9f..000000000000
--- a/arch/s390/lib/uaccess.S
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * arch/s390/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 ahi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slr %r2,%r2
32 br %r14
334: lhi %r0,-4096
34 lr %r5,%r4
35 slr %r5,%r0
36 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slr %r3,%r5
436: lr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .long 0b,4b
47 .long 2b,4b
48 .long 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 ahi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slr %r2,%r2
67 br %r14
684: lhi %r0,-4096
69 lr %r5,%r4
70 slr %r5,%r0
71 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slr %r3,%r5
786: lr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .long 0b,4b
82 .long 2b,4b
83 .long 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 ahi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: ahi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 ahi %r3,-1
100 jnz 1b
1012: lr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: ahi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slr %r2,%r2
111 br %r14
112 .section __ex_table,"a"
113 .long 1b,2b
114 .long 3b,0b
115 .long 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 bras %r5,0f
124 .long empty_zero_page
1250: l %r5,0(%r5)
126 slr %r0,%r0
1271: mvcs 0(%r3,%r2),0(%r5),%r0
128 jnz 2f
129 slr %r2,%r2
130 br %r14
1312: la %r2,256(%r2)
132 ahi %r3,-256
1333: mvcs 0(%r3,%r2),0(%r5),%r0
134 jnz 2b
1354: slr %r2,%r2
136 br %r14
1375: lhi %r0,-4096
138 lr %r4,%r2
139 slr %r4,%r0
140 nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
141 slr %r4,%r2 # %r4 = #bytes to next user page boundary
142 clr %r3,%r4 # clear crosses next page boundary ?
143 jnh 7f # no, the current page faulted
144 # clear with the reduced length which is < 256
1456: mvcs 0(%r4,%r2),0(%r5),%r0
146 slr %r3,%r4
1477: lr %r2,%r3
148 br %r14
149 .section __ex_table,"a"
150 .long 1b,5b
151 .long 3b,5b
152 .long 6b,7b
153 .previous
154
155 .align 4
156 .text
157 .globl __strncpy_from_user_asm
158 # %r2 = count, %r3 = dst, %r4 = src
159__strncpy_from_user_asm:
160 lhi %r0,0
161 lr %r1,%r4
162 la %r4,0(%r4) # clear high order bit from %r4
163 la %r2,0(%r2,%r4) # %r2 points to first byte after string
164 sacf 256
1650: srst %r2,%r1
166 jo 0b
167 sacf 0
168 lr %r1,%r2
169 jh 1f # \0 found in string ?
170 ahi %r1,1 # include \0 in copy
1711: slr %r1,%r4 # %r1 = copy length (without \0)
172 slr %r2,%r4 # %r2 = return length (including \0)
1732: mvcp 0(%r1,%r3),0(%r4),%r0
174 jnz 3f
175 br %r14
1763: la %r3,256(%r3)
177 la %r4,256(%r4)
178 ahi %r1,-256
179 mvcp 0(%r1,%r3),0(%r4),%r0
180 jnz 3b
181 br %r14
1824: sacf 0
183 lhi %r2,-EFAULT
184 br %r14
185 .section __ex_table,"a"
186 .long 0b,4b
187 .previous
188
189 .align 4
190 .text
191 .globl __strnlen_user_asm
192 # %r2 = count, %r3 = src
193__strnlen_user_asm:
194 lhi %r0,0
195 lr %r1,%r3
196 la %r3,0(%r3) # clear high order bit from %r4
197 la %r2,0(%r2,%r3) # %r2 points to first byte after string
198 sacf 256
1990: srst %r2,%r1
200 jo 0b
201 sacf 0
202 ahi %r2,1 # strnlen_user result includes the \0
203 # or return count+1 if \0 not found
204 slr %r2,%r3
205 br %r14
2062: sacf 0
207 slr %r2,%r2 # return 0 on exception
208 br %r14
209 .section __ex_table,"a"
210 .long 0b,2b
211 .previous
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S
deleted file mode 100644
index 1f755be22f92..000000000000
--- a/arch/s390/lib/uaccess64.S
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * arch/s390x/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slgr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slgr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 aghi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slgr %r2,%r2
32 br %r14
334: lghi %r0,-4096
34 lgr %r5,%r4
35 slgr %r5,%r0
36 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clgr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slgr %r3,%r5
436: lgr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .quad 0b,4b
47 .quad 2b,4b
48 .quad 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slgr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slgr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 aghi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slgr %r2,%r2
67 br %r14
684: lghi %r0,-4096
69 lgr %r5,%r4
70 slgr %r5,%r0
71 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clgr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slgr %r3,%r5
786: lgr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .quad 0b,4b
82 .quad 2b,4b
83 .quad 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 aghi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: aghi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 aghi %r3,-1
100 jnz 1b
1012: lgr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: aghi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slgr %r2,%r2
111 br 14
112 .section __ex_table,"a"
113 .quad 1b,2b
114 .quad 3b,0b
115 .quad 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 slgr %r0,%r0
124 larl %r5,empty_zero_page
1251: mvcs 0(%r3,%r2),0(%r5),%r0
126 jnz 2f
127 slgr %r2,%r2
128 br %r14
1292: la %r2,256(%r2)
130 aghi %r3,-256
1313: mvcs 0(%r3,%r2),0(%r5),%r0
132 jnz 2b
1334: slgr %r2,%r2
134 br %r14
1355: lghi %r0,-4096
136 lgr %r4,%r2
137 slgr %r4,%r0
138 ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
139 slgr %r4,%r2 # %r4 = #bytes to next user page boundary
140 clgr %r3,%r4 # clear crosses next page boundary ?
141 jnh 7f # no, the current page faulted
142 # clear with the reduced length which is < 256
1436: mvcs 0(%r4,%r2),0(%r5),%r0
144 slgr %r3,%r4
1457: lgr %r2,%r3
146 br %r14
147 .section __ex_table,"a"
148 .quad 1b,5b
149 .quad 3b,5b
150 .quad 6b,7b
151 .previous
152
153 .align 4
154 .text
155 .globl __strncpy_from_user_asm
156 # %r2 = count, %r3 = dst, %r4 = src
157__strncpy_from_user_asm:
158 lghi %r0,0
159 lgr %r1,%r4
160 la %r2,0(%r2,%r4) # %r2 points to first byte after string
161 sacf 256
1620: srst %r2,%r1
163 jo 0b
164 sacf 0
165 lgr %r1,%r2
166 jh 1f # \0 found in string ?
167 aghi %r1,1 # include \0 in copy
1681: slgr %r1,%r4 # %r1 = copy length (without \0)
169 slgr %r2,%r4 # %r2 = return length (including \0)
1702: mvcp 0(%r1,%r3),0(%r4),%r0
171 jnz 3f
172 br %r14
1733: la %r3,256(%r3)
174 la %r4,256(%r4)
175 aghi %r1,-256
176 mvcp 0(%r1,%r3),0(%r4),%r0
177 jnz 3b
178 br %r14
1794: sacf 0
180 lghi %r2,-EFAULT
181 br %r14
182 .section __ex_table,"a"
183 .quad 0b,4b
184 .previous
185
186 .align 4
187 .text
188 .globl __strnlen_user_asm
189 # %r2 = count, %r3 = src
190__strnlen_user_asm:
191 lghi %r0,0
192 lgr %r1,%r3
193 la %r2,0(%r2,%r3) # %r2 points to first byte after string
194 sacf 256
1950: srst %r2,%r1
196 jo 0b
197 sacf 0
198 aghi %r2,1 # strnlen_user result includes the \0
199 # or return count+1 if \0 not found
200 slgr %r2,%r3
201 br %r14
2022: sacf 0
203 slgr %r2,%r2 # return 0 on exception
204 br %r14
205 .section __ex_table,"a"
206 .quad 0b,2b
207 .previous
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
new file mode 100644
index 000000000000..86c96d6c191a
--- /dev/null
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -0,0 +1,156 @@
1/*
2 * arch/s390/lib/uaccess_mvcos.c
3 *
4 * Optimized user space space access functions based on mvcos.
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
10
11#include <linux/errno.h>
12#include <linux/mm.h>
13#include <asm/uaccess.h>
14#include <asm/futex.h>
15
16#ifndef __s390x__
17#define AHI "ahi"
18#define ALR "alr"
19#define CLR "clr"
20#define LHI "lhi"
21#define SLR "slr"
22#else
23#define AHI "aghi"
24#define ALR "algr"
25#define CLR "clgr"
26#define LHI "lghi"
27#define SLR "slgr"
28#endif
29
30size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31{
32 register unsigned long reg0 asm("0") = 0x81UL;
33 unsigned long tmp1, tmp2;
34
35 tmp1 = -4096UL;
36 asm volatile(
37 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
38 " jz 4f\n"
39 "1:"ALR" %0,%3\n"
40 " "SLR" %1,%3\n"
41 " "SLR" %2,%3\n"
42 " j 0b\n"
43 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
44 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
45 " "SLR" %4,%1\n"
46 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
47 " jnh 5f\n"
48 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
49 " "SLR" %0,%4\n"
50 " j 5f\n"
51 "4:"SLR" %0,%0\n"
52 "5: \n"
53 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
54 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
55 : "d" (reg0) : "cc", "memory");
56 return size;
57}
58
59size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
60{
61 register unsigned long reg0 asm("0") = 0x810000UL;
62 unsigned long tmp1, tmp2;
63
64 tmp1 = -4096UL;
65 asm volatile(
66 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
67 " jz 4f\n"
68 "1:"ALR" %0,%3\n"
69 " "SLR" %1,%3\n"
70 " "SLR" %2,%3\n"
71 " j 0b\n"
72 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
73 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
74 " "SLR" %4,%1\n"
75 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
76 " jnh 5f\n"
77 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
78 " "SLR" %0,%4\n"
79 " j 5f\n"
80 "4:"SLR" %0,%0\n"
81 "5: \n"
82 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
83 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
84 : "d" (reg0) : "cc", "memory");
85 return size;
86}
87
88size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
89{
90 register unsigned long reg0 asm("0") = 0x810081UL;
91 unsigned long tmp1, tmp2;
92
93 tmp1 = -4096UL;
94 /* FIXME: copy with reduced length. */
95 asm volatile(
96 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
97 " jz 2f\n"
98 "1:"ALR" %0,%3\n"
99 " "SLR" %1,%3\n"
100 " "SLR" %2,%3\n"
101 " j 0b\n"
102 "2:"SLR" %0,%0\n"
103 "3: \n"
104 EX_TABLE(0b,3b)
105 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
106 : "d" (reg0) : "cc", "memory");
107 return size;
108}
109
110size_t clear_user_mvcos(size_t size, void __user *to)
111{
112 register unsigned long reg0 asm("0") = 0x810000UL;
113 unsigned long tmp1, tmp2;
114
115 tmp1 = -4096UL;
116 asm volatile(
117 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
118 " jz 4f\n"
119 "1:"ALR" %0,%2\n"
120 " "SLR" %1,%2\n"
121 " j 0b\n"
122 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
123 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
124 " "SLR" %3,%1\n"
125 " "CLR" %0,%3\n" /* copy crosses next page boundary? */
126 " jnh 5f\n"
127 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
128 " "SLR" %0,%3\n"
129 " j 5f\n"
130 "4:"SLR" %0,%0\n"
131 "5: \n"
132 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
133 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
134 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
135 return size;
136}
137
138extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
139extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
140extern size_t strnlen_user_std(size_t, const char __user *);
141extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
142extern int futex_atomic_op(int, int __user *, int, int *);
143extern int futex_atomic_cmpxchg(int __user *, int, int);
144
145struct uaccess_ops uaccess_mvcos = {
146 .copy_from_user = copy_from_user_mvcos,
147 .copy_from_user_small = copy_from_user_std_small,
148 .copy_to_user = copy_to_user_mvcos,
149 .copy_to_user_small = copy_to_user_std_small,
150 .copy_in_user = copy_in_user_mvcos,
151 .clear_user = clear_user_mvcos,
152 .strnlen_user = strnlen_user_std,
153 .strncpy_from_user = strncpy_from_user_std,
154 .futex_atomic_op = futex_atomic_op,
155 .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
156};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
new file mode 100644
index 000000000000..9a4d4a29ea79
--- /dev/null
+++ b/arch/s390/lib/uaccess_std.c
@@ -0,0 +1,340 @@
1/*
2 * arch/s390/lib/uaccess_std.c
3 *
4 * Standard user space access functions based on mvcp/mvcs and doing
5 * interesting things in the secondary space mode.
6 *
7 * Copyright (C) IBM Corp. 2006
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 */
11
12#include <linux/errno.h>
13#include <linux/mm.h>
14#include <asm/uaccess.h>
15#include <asm/futex.h>
16
17#ifndef __s390x__
18#define AHI "ahi"
19#define ALR "alr"
20#define CLR "clr"
21#define LHI "lhi"
22#define SLR "slr"
23#else
24#define AHI "aghi"
25#define ALR "algr"
26#define CLR "clgr"
27#define LHI "lghi"
28#define SLR "slgr"
29#endif
30
31size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
32{
33 unsigned long tmp1, tmp2;
34
35 tmp1 = -256UL;
36 asm volatile(
37 "0: mvcp 0(%0,%2),0(%1),%3\n"
38 " jz 5f\n"
39 "1:"ALR" %0,%3\n"
40 " la %1,256(%1)\n"
41 " la %2,256(%2)\n"
42 "2: mvcp 0(%0,%2),0(%1),%3\n"
43 " jnz 1b\n"
44 " j 5f\n"
45 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
46 " "LHI" %3,-4096\n"
47 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
48 " "SLR" %4,%1\n"
49 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
50 " jnh 6f\n"
51 "4: mvcp 0(%4,%2),0(%1),%3\n"
52 " "SLR" %0,%4\n"
53 " j 6f\n"
54 "5:"SLR" %0,%0\n"
55 "6: \n"
56 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
57 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
58 : : "cc", "memory");
59 return size;
60}
61
62size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
63{
64 unsigned long tmp1, tmp2;
65
66 tmp1 = 0UL;
67 asm volatile(
68 "0: mvcp 0(%0,%2),0(%1),%3\n"
69 " "SLR" %0,%0\n"
70 " j 3f\n"
71 "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
72 " "LHI" %3,-4096\n"
73 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
74 " "SLR" %4,%1\n"
75 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
76 " jnh 3f\n"
77 "2: mvcp 0(%4,%2),0(%1),%3\n"
78 " "SLR" %0,%4\n"
79 "3:\n"
80 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
81 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
82 : : "cc", "memory");
83 return size;
84}
85
86size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
87{
88 unsigned long tmp1, tmp2;
89
90 tmp1 = -256UL;
91 asm volatile(
92 "0: mvcs 0(%0,%1),0(%2),%3\n"
93 " jz 5f\n"
94 "1:"ALR" %0,%3\n"
95 " la %1,256(%1)\n"
96 " la %2,256(%2)\n"
97 "2: mvcs 0(%0,%1),0(%2),%3\n"
98 " jnz 1b\n"
99 " j 5f\n"
100 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
101 " "LHI" %3,-4096\n"
102 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
103 " "SLR" %4,%1\n"
104 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
105 " jnh 6f\n"
106 "4: mvcs 0(%4,%1),0(%2),%3\n"
107 " "SLR" %0,%4\n"
108 " j 6f\n"
109 "5:"SLR" %0,%0\n"
110 "6: \n"
111 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
112 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
113 : : "cc", "memory");
114 return size;
115}
116
117size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
118{
119 unsigned long tmp1, tmp2;
120
121 tmp1 = 0UL;
122 asm volatile(
123 "0: mvcs 0(%0,%1),0(%2),%3\n"
124 " "SLR" %0,%0\n"
125 " j 3f\n"
126 "1: la %4,255(%1)\n" /* ptr + 255 */
127 " "LHI" %3,-4096\n"
128 " nr %4,%3\n" /* (ptr + 255) & -4096UL */
129 " "SLR" %4,%1\n"
130 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
131 " jnh 3f\n"
132 "2: mvcs 0(%4,%1),0(%2),%3\n"
133 " "SLR" %0,%4\n"
134 "3:\n"
135 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
136 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
137 : : "cc", "memory");
138 return size;
139}
140
141size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
142{
143 unsigned long tmp1;
144
145 asm volatile(
146 " "AHI" %0,-1\n"
147 " jo 5f\n"
148 " sacf 256\n"
149 " bras %3,3f\n"
150 "0:"AHI" %0,257\n"
151 "1: mvc 0(1,%1),0(%2)\n"
152 " la %1,1(%1)\n"
153 " la %2,1(%2)\n"
154 " "AHI" %0,-1\n"
155 " jnz 1b\n"
156 " j 5f\n"
157 "2: mvc 0(256,%1),0(%2)\n"
158 " la %1,256(%1)\n"
159 " la %2,256(%2)\n"
160 "3:"AHI" %0,-256\n"
161 " jnm 2b\n"
162 "4: ex %0,1b-0b(%3)\n"
163 " sacf 0\n"
164 "5: "SLR" %0,%0\n"
165 "6:\n"
166 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
167 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
168 : : "cc", "memory");
169 return size;
170}
171
172size_t clear_user_std(size_t size, void __user *to)
173{
174 unsigned long tmp1, tmp2;
175
176 asm volatile(
177 " "AHI" %0,-1\n"
178 " jo 5f\n"
179 " sacf 256\n"
180 " bras %3,3f\n"
181 " xc 0(1,%1),0(%1)\n"
182 "0:"AHI" %0,257\n"
183 " la %2,255(%1)\n" /* %2 = ptr + 255 */
184 " srl %2,12\n"
185 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
186 " "SLR" %2,%1\n"
187 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
188 " jnh 5f\n"
189 " "AHI" %2,-1\n"
190 "1: ex %2,0(%3)\n"
191 " "AHI" %2,1\n"
192 " "SLR" %0,%2\n"
193 " j 5f\n"
194 "2: xc 0(256,%1),0(%1)\n"
195 " la %1,256(%1)\n"
196 "3:"AHI" %0,-256\n"
197 " jnm 2b\n"
198 "4: ex %0,0(%3)\n"
199 " sacf 0\n"
200 "5: "SLR" %0,%0\n"
201 "6:\n"
202 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
203 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
204 : : "cc", "memory");
205 return size;
206}
207
208size_t strnlen_user_std(size_t size, const char __user *src)
209{
210 register unsigned long reg0 asm("0") = 0UL;
211 unsigned long tmp1, tmp2;
212
213 asm volatile(
214 " la %2,0(%1)\n"
215 " la %3,0(%0,%1)\n"
216 " "SLR" %0,%0\n"
217 " sacf 256\n"
218 "0: srst %3,%2\n"
219 " jo 0b\n"
220 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
221 " "SLR" %0,%1\n"
222 "1: sacf 0\n"
223 EX_TABLE(0b,1b)
224 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
225 : "d" (reg0) : "cc", "memory");
226 return size;
227}
228
229size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
230{
231 register unsigned long reg0 asm("0") = 0UL;
232 unsigned long tmp1, tmp2;
233
234 asm volatile(
235 " la %3,0(%1)\n"
236 " la %4,0(%0,%1)\n"
237 " sacf 256\n"
238 "0: srst %4,%3\n"
239 " jo 0b\n"
240 " sacf 0\n"
241 " la %0,0(%4)\n"
242 " jh 1f\n" /* found \0 in string ? */
243 " "AHI" %4,1\n" /* include \0 in copy */
244 "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
245 " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
246 "2: mvcp 0(%4,%2),0(%1),%5\n"
247 " jz 9f\n"
248 "3:"AHI" %4,-256\n"
249 " la %1,256(%1)\n"
250 " la %2,256(%2)\n"
251 "4: mvcp 0(%4,%2),0(%1),%5\n"
252 " jnz 3b\n"
253 " j 9f\n"
254 "7: sacf 0\n"
255 "8:"LHI" %0,%6\n"
256 "9:\n"
257 EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
258 : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
259 : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
260 return size;
261}
262
263#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
264 asm volatile( \
265 " sacf 256\n" \
266 "0: l %1,0(%6)\n" \
267 "1:"insn \
268 "2: cs %1,%2,0(%6)\n" \
269 "3: jl 1b\n" \
270 " lhi %0,0\n" \
271 "4: sacf 0\n" \
272 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
273 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
274 "=m" (*uaddr) \
275 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
276 "m" (*uaddr) : "cc");
277
278int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
279{
280 int oldval = 0, newval, ret;
281
282 inc_preempt_count();
283
284 switch (op) {
285 case FUTEX_OP_SET:
286 __futex_atomic_op("lr %2,%5\n",
287 ret, oldval, newval, uaddr, oparg);
288 break;
289 case FUTEX_OP_ADD:
290 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
291 ret, oldval, newval, uaddr, oparg);
292 break;
293 case FUTEX_OP_OR:
294 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
295 ret, oldval, newval, uaddr, oparg);
296 break;
297 case FUTEX_OP_ANDN:
298 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
299 ret, oldval, newval, uaddr, oparg);
300 break;
301 case FUTEX_OP_XOR:
302 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
303 ret, oldval, newval, uaddr, oparg);
304 break;
305 default:
306 ret = -ENOSYS;
307 }
308 dec_preempt_count();
309 *old = oldval;
310 return ret;
311}
312
313int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
314{
315 int ret;
316
317 asm volatile(
318 " sacf 256\n"
319 " cs %1,%4,0(%5)\n"
320 "0: lr %0,%1\n"
321 "1: sacf 0\n"
322 EX_TABLE(0b,1b)
323 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
324 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
325 : "cc", "memory" );
326 return ret;
327}
328
329struct uaccess_ops uaccess_std = {
330 .copy_from_user = copy_from_user_std,
331 .copy_from_user_small = copy_from_user_std_small,
332 .copy_to_user = copy_to_user_std,
333 .copy_to_user_small = copy_to_user_std_small,
334 .copy_in_user = copy_in_user_std,
335 .clear_user = clear_user_std,
336 .strnlen_user = strnlen_user_std,
337 .strncpy_from_user = strncpy_from_user_std,
338 .futex_atomic_op = futex_atomic_op,
339 .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
340};
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ceea51cff03b..786a44dba5bf 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -53,22 +53,6 @@ static void cmm_timer_fn(unsigned long);
53static void cmm_set_timer(void); 53static void cmm_set_timer(void);
54 54
55static long 55static long
56cmm_strtoul(const char *cp, char **endp)
57{
58 unsigned int base = 10;
59
60 if (*cp == '0') {
61 base = 8;
62 cp++;
63 if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
64 base = 16;
65 cp++;
66 }
67 }
68 return simple_strtoul(cp, endp, base);
69}
70
71static long
72cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list) 56cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
73{ 57{
74 struct cmm_page_array *pa; 58 struct cmm_page_array *pa;
@@ -276,7 +260,7 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
276 return -EFAULT; 260 return -EFAULT;
277 buf[sizeof(buf) - 1] = '\0'; 261 buf[sizeof(buf) - 1] = '\0';
278 cmm_skip_blanks(buf, &p); 262 cmm_skip_blanks(buf, &p);
279 pages = cmm_strtoul(p, &p); 263 pages = simple_strtoul(p, &p, 0);
280 if (ctl == &cmm_table[0]) 264 if (ctl == &cmm_table[0])
281 cmm_set_pages(pages); 265 cmm_set_pages(pages);
282 else 266 else
@@ -317,9 +301,9 @@ cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
317 return -EFAULT; 301 return -EFAULT;
318 buf[sizeof(buf) - 1] = '\0'; 302 buf[sizeof(buf) - 1] = '\0';
319 cmm_skip_blanks(buf, &p); 303 cmm_skip_blanks(buf, &p);
320 pages = cmm_strtoul(p, &p); 304 pages = simple_strtoul(p, &p, 0);
321 cmm_skip_blanks(p, &p); 305 cmm_skip_blanks(p, &p);
322 seconds = cmm_strtoul(p, &p); 306 seconds = simple_strtoul(p, &p, 0);
323 cmm_set_timeout(pages, seconds); 307 cmm_set_timeout(pages, seconds);
324 } else { 308 } else {
325 len = sprintf(buf, "%ld %ld\n", 309 len = sprintf(buf, "%ld %ld\n",
@@ -382,24 +366,24 @@ cmm_smsg_target(char *from, char *msg)
382 if (strncmp(msg, "SHRINK", 6) == 0) { 366 if (strncmp(msg, "SHRINK", 6) == 0) {
383 if (!cmm_skip_blanks(msg + 6, &msg)) 367 if (!cmm_skip_blanks(msg + 6, &msg))
384 return; 368 return;
385 pages = cmm_strtoul(msg, &msg); 369 pages = simple_strtoul(msg, &msg, 0);
386 cmm_skip_blanks(msg, &msg); 370 cmm_skip_blanks(msg, &msg);
387 if (*msg == '\0') 371 if (*msg == '\0')
388 cmm_set_pages(pages); 372 cmm_set_pages(pages);
389 } else if (strncmp(msg, "RELEASE", 7) == 0) { 373 } else if (strncmp(msg, "RELEASE", 7) == 0) {
390 if (!cmm_skip_blanks(msg + 7, &msg)) 374 if (!cmm_skip_blanks(msg + 7, &msg))
391 return; 375 return;
392 pages = cmm_strtoul(msg, &msg); 376 pages = simple_strtoul(msg, &msg, 0);
393 cmm_skip_blanks(msg, &msg); 377 cmm_skip_blanks(msg, &msg);
394 if (*msg == '\0') 378 if (*msg == '\0')
395 cmm_add_timed_pages(pages); 379 cmm_add_timed_pages(pages);
396 } else if (strncmp(msg, "REUSE", 5) == 0) { 380 } else if (strncmp(msg, "REUSE", 5) == 0) {
397 if (!cmm_skip_blanks(msg + 5, &msg)) 381 if (!cmm_skip_blanks(msg + 5, &msg))
398 return; 382 return;
399 pages = cmm_strtoul(msg, &msg); 383 pages = simple_strtoul(msg, &msg, 0);
400 if (!cmm_skip_blanks(msg, &msg)) 384 if (!cmm_skip_blanks(msg, &msg))
401 return; 385 return;
402 seconds = cmm_strtoul(msg, &msg); 386 seconds = simple_strtoul(msg, &msg, 0);
403 cmm_skip_blanks(msg, &msg); 387 cmm_skip_blanks(msg, &msg);
404 if (*msg == '\0') 388 if (*msg == '\0')
405 cmm_set_timeout(pages, seconds); 389 cmm_set_timeout(pages, seconds);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7cd82575813d..44f0cda7e72e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -25,10 +25,12 @@
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h> 27#include <linux/hardirq.h>
28#include <linux/kprobes.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/kdebug.h>
32 34
33#ifndef CONFIG_64BIT 35#ifndef CONFIG_64BIT
34#define __FAIL_ADDR_MASK 0x7ffff000 36#define __FAIL_ADDR_MASK 0x7ffff000
@@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug;
48 50
49extern void die(const char *,struct pt_regs *,long); 51extern void die(const char *,struct pt_regs *,long);
50 52
53#ifdef CONFIG_KPROBES
54ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
55int register_page_fault_notifier(struct notifier_block *nb)
56{
57 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
58}
59
60int unregister_page_fault_notifier(struct notifier_block *nb)
61{
62 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
63}
64
65static inline int notify_page_fault(enum die_val val, const char *str,
66 struct pt_regs *regs, long err, int trap, int sig)
67{
68 struct die_args args = {
69 .regs = regs,
70 .str = str,
71 .err = err,
72 .trapnr = trap,
73 .signr = sig
74 };
75 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
76}
77#else
78static inline int notify_page_fault(enum die_val val, const char *str,
79 struct pt_regs *regs, long err, int trap, int sig)
80{
81 return NOTIFY_DONE;
82}
83#endif
84
51extern spinlock_t timerlist_lock; 85extern spinlock_t timerlist_lock;
52 86
53/* 87/*
@@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
159 * 11 Page translation -> Not present (nullification) 193 * 11 Page translation -> Not present (nullification)
160 * 3b Region third trans. -> Not present (nullification) 194 * 3b Region third trans. -> Not present (nullification)
161 */ 195 */
162static inline void 196static inline void __kprobes
163do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 197do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
164{ 198{
165 struct task_struct *tsk; 199 struct task_struct *tsk;
@@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
173 tsk = current; 207 tsk = current;
174 mm = tsk->mm; 208 mm = tsk->mm;
175 209
210 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
211 SIGSEGV) == NOTIFY_STOP)
212 return;
213
176 /* 214 /*
177 * Check for low-address protection. This needs to be treated 215 * Check for low-address protection. This needs to be treated
178 * as a special case because the translation exception code 216 * as a special case because the translation exception code
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6e6b6de77770..cfd9b8f7a523 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,16 +108,23 @@ void __init paging_init(void)
108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 108 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
109 static const int ssm_mask = 0x04000000L; 109 static const int ssm_mask = 0x04000000L;
110 unsigned long ro_start_pfn, ro_end_pfn; 110 unsigned long ro_start_pfn, ro_end_pfn;
111 unsigned long zones_size[MAX_NR_ZONES];
111 112
112 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 113 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
113 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 114 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
114 115
116 memset(zones_size, 0, sizeof(zones_size));
117 zones_size[ZONE_DMA] = max_low_pfn;
118 free_area_init_node(0, &contig_page_data, zones_size,
119 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
120 zholes_size);
121
115 /* unmap whole virtual address space */ 122 /* unmap whole virtual address space */
116 123
117 pg_dir = swapper_pg_dir; 124 pg_dir = swapper_pg_dir;
118 125
119 for (i=0;i<KERNEL_PGD_PTRS;i++) 126 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t*)pg_dir++); 127 pmd_clear((pmd_t *) pg_dir++);
121 128
122 /* 129 /*
123 * map whole physical memory to virtual memory (identity mapping) 130 * map whole physical memory to virtual memory (identity mapping)
@@ -131,10 +138,7 @@ void __init paging_init(void)
131 */ 138 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 139 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133 140
134 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); 141 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
136 pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048));
137 pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072));
138 pg_dir++; 142 pg_dir++;
139 143
140 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { 144 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
@@ -143,8 +147,8 @@ void __init paging_init(void)
143 else 147 else
144 pte = pfn_pte(pfn, PAGE_KERNEL); 148 pte = pfn_pte(pfn, PAGE_KERNEL);
145 if (pfn >= max_low_pfn) 149 if (pfn >= max_low_pfn)
146 pte_clear(&init_mm, 0, &pte); 150 pte_val(pte) = _PAGE_TYPE_EMPTY;
147 set_pte(pg_table, pte); 151 set_pte(pg_table, pte);
148 pfn++; 152 pfn++;
149 } 153 }
150 } 154 }
@@ -159,16 +163,6 @@ void __init paging_init(void)
159 : : "m" (pgdir_k), "m" (ssm_mask)); 163 : : "m" (pgdir_k), "m" (ssm_mask));
160 164
161 local_flush_tlb(); 165 local_flush_tlb();
162
163 {
164 unsigned long zones_size[MAX_NR_ZONES];
165
166 memset(zones_size, 0, sizeof(zones_size));
167 zones_size[ZONE_DMA] = max_low_pfn;
168 free_area_init_node(0, &contig_page_data, zones_size,
169 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
170 zholes_size);
171 }
172 return; 166 return;
173} 167}
174 168
@@ -236,10 +230,8 @@ void __init paging_init(void)
236 pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); 230 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
237 else 231 else
238 pte = pfn_pte(pfn, PAGE_KERNEL); 232 pte = pfn_pte(pfn, PAGE_KERNEL);
239 if (pfn >= max_low_pfn) { 233 if (pfn >= max_low_pfn)
240 pte_clear(&init_mm, 0, &pte); 234 pte_val(pte) = _PAGE_TYPE_EMPTY;
241 continue;
242 }
243 set_pte(pt_dir, pte); 235 set_pte(pt_dir, pte);
244 pfn++; 236 pfn++;
245 } 237 }
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
index 0c85e9d6a448..7080b413ddc9 100644
--- a/drivers/base/hypervisor.c
+++ b/drivers/base/hypervisor.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * hypervisor.c - /sys/hypervisor subsystem. 2 * hypervisor.c - /sys/hypervisor subsystem.
3 * 3 *
4 * This file is released under the GPLv2 4 * Copyright (C) IBM Corp. 2006
5 * 5 *
6 * This file is released under the GPLv2
6 */ 7 */
7 8
8#include <linux/kobject.h> 9#include <linux/kobject.h>
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index 4d36208ff8de..ae89b9b88743 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -213,17 +213,35 @@ config MONREADER
213 help 213 help
214 Character device driver for reading z/VM monitor service records 214 Character device driver for reading z/VM monitor service records
215 215
216config MONWRITER
217 tristate "API for writing z/VM monitor service records"
218 default "m"
219 help
220 Character device driver for writing z/VM monitor service records
221
216endmenu 222endmenu
217 223
218menu "Cryptographic devices" 224menu "Cryptographic devices"
219 225
220config Z90CRYPT 226config ZCRYPT
221 tristate "Support for PCI-attached cryptographic adapters" 227 tristate "Support for PCI-attached cryptographic adapters"
222 default "m" 228 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
223 help 229 default "m"
230 help
224 Select this option if you want to use a PCI-attached cryptographic 231 Select this option if you want to use a PCI-attached cryptographic
225 adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI 232 adapter like:
226 Cryptographic Coprocessor (PCICC). This option is also available 233 + PCI Cryptographic Accelerator (PCICA)
227 as a module called z90crypt.ko. 234 + PCI Cryptographic Coprocessor (PCICC)
235 + PCI-X Cryptographic Coprocessor (PCIXCC)
236 + Crypto Express2 Coprocessor (CEX2C)
237 + Crypto Express2 Accelerator (CEX2A)
238
239config ZCRYPT_MONOLITHIC
240 bool "Monolithic zcrypt module"
241 depends on ZCRYPT="m"
242 help
243 Select this option if you want to have a single module z90crypt.ko
244 that contains all parts of the crypto device driver (ap bus,
245 request router and all the card drivers).
228 246
229endmenu 247endmenu
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 25c1ef6dfd44..d0647d116eaa 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -184,7 +184,7 @@ dasd_state_known_to_basic(struct dasd_device * device)
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 8 * sizeof (long)); 185 8 * sizeof (long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 186 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_EMERG); 187 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
189 189
190 device->state = DASD_STATE_BASIC; 190 device->state = DASD_STATE_BASIC;
@@ -893,7 +893,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 893
894 device = (struct dasd_device *) cqr->device; 894 device = (struct dasd_device *) cqr->device;
895 if (device == NULL || 895 if (device == NULL ||
896 device != dasd_device_from_cdev(cdev) || 896 device != dasd_device_from_cdev_locked(cdev) ||
897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
899 cdev->dev.bus_id); 899 cdev->dev.bus_id);
@@ -970,7 +970,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
970 /* first of all check for state change pending interrupt */ 970 /* first of all check for state change pending interrupt */
971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
972 if ((irb->scsw.dstat & mask) == mask) { 972 if ((irb->scsw.dstat & mask) == mask) {
973 device = dasd_device_from_cdev(cdev); 973 device = dasd_device_from_cdev_locked(cdev);
974 if (!IS_ERR(device)) { 974 if (!IS_ERR(device)) {
975 dasd_handle_state_change_pending(device); 975 dasd_handle_state_change_pending(device);
976 dasd_put_device(device); 976 dasd_put_device(device);
@@ -2169,7 +2169,7 @@ dasd_init(void)
2169 goto failed; 2169 goto failed;
2170 } 2170 }
2171 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2171 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2172 debug_set_level(dasd_debug_area, DBF_EMERG); 2172 debug_set_level(dasd_debug_area, DBF_WARNING);
2173 2173
2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2175 2175
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9af02c79ce8a..91cf971f0652 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -258,8 +258,12 @@ dasd_parse_keyword( char *parsestring ) {
258 return residual_str; 258 return residual_str;
259 } 259 }
260 if (strncmp("nopav", parsestring, length) == 0) { 260 if (strncmp("nopav", parsestring, length) == 0) {
261 dasd_nopav = 1; 261 if (MACHINE_IS_VM)
262 MESSAGE(KERN_INFO, "%s", "disable PAV mode"); 262 MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM");
263 else {
264 dasd_nopav = 1;
265 MESSAGE(KERN_INFO, "%s", "disable PAV mode");
266 }
263 return residual_str; 267 return residual_str;
264 } 268 }
265 if (strncmp("fixedbuffers", parsestring, length) == 0) { 269 if (strncmp("fixedbuffers", parsestring, length) == 0) {
@@ -523,17 +527,17 @@ dasd_create_device(struct ccw_device *cdev)
523{ 527{
524 struct dasd_devmap *devmap; 528 struct dasd_devmap *devmap;
525 struct dasd_device *device; 529 struct dasd_device *device;
530 unsigned long flags;
526 int rc; 531 int rc;
527 532
528 devmap = dasd_devmap_from_cdev(cdev); 533 devmap = dasd_devmap_from_cdev(cdev);
529 if (IS_ERR(devmap)) 534 if (IS_ERR(devmap))
530 return (void *) devmap; 535 return (void *) devmap;
531 cdev->dev.driver_data = devmap;
532 536
533 device = dasd_alloc_device(); 537 device = dasd_alloc_device();
534 if (IS_ERR(device)) 538 if (IS_ERR(device))
535 return device; 539 return device;
536 atomic_set(&device->ref_count, 2); 540 atomic_set(&device->ref_count, 3);
537 541
538 spin_lock(&dasd_devmap_lock); 542 spin_lock(&dasd_devmap_lock);
539 if (!devmap->device) { 543 if (!devmap->device) {
@@ -552,6 +556,11 @@ dasd_create_device(struct ccw_device *cdev)
552 dasd_free_device(device); 556 dasd_free_device(device);
553 return ERR_PTR(rc); 557 return ERR_PTR(rc);
554 } 558 }
559
560 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
561 cdev->dev.driver_data = device;
562 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
563
555 return device; 564 return device;
556} 565}
557 566
@@ -569,6 +578,7 @@ dasd_delete_device(struct dasd_device *device)
569{ 578{
570 struct ccw_device *cdev; 579 struct ccw_device *cdev;
571 struct dasd_devmap *devmap; 580 struct dasd_devmap *devmap;
581 unsigned long flags;
572 582
573 /* First remove device pointer from devmap. */ 583 /* First remove device pointer from devmap. */
574 devmap = dasd_find_busid(device->cdev->dev.bus_id); 584 devmap = dasd_find_busid(device->cdev->dev.bus_id);
@@ -582,9 +592,16 @@ dasd_delete_device(struct dasd_device *device)
582 devmap->device = NULL; 592 devmap->device = NULL;
583 spin_unlock(&dasd_devmap_lock); 593 spin_unlock(&dasd_devmap_lock);
584 594
585 /* Drop ref_count by 2, one for the devmap reference and 595 /* Disconnect dasd_device structure from ccw_device structure. */
586 * one for the passed reference. */ 596 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
587 atomic_sub(2, &device->ref_count); 597 device->cdev->dev.driver_data = NULL;
598 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
599
600 /*
601 * Drop ref_count by 3, one for the devmap reference, one for
602 * the cdev reference and one for the passed reference.
603 */
604 atomic_sub(3, &device->ref_count);
588 605
589 /* Wait for reference counter to drop to zero. */ 606 /* Wait for reference counter to drop to zero. */
590 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); 607 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
@@ -593,9 +610,6 @@ dasd_delete_device(struct dasd_device *device)
593 cdev = device->cdev; 610 cdev = device->cdev;
594 device->cdev = NULL; 611 device->cdev = NULL;
595 612
596 /* Disconnect dasd_devmap structure from ccw_device structure. */
597 cdev->dev.driver_data = NULL;
598
599 /* Put ccw_device structure. */ 613 /* Put ccw_device structure. */
600 put_device(&cdev->dev); 614 put_device(&cdev->dev);
601 615
@@ -615,21 +629,32 @@ dasd_put_device_wake(struct dasd_device *device)
615 629
616/* 630/*
617 * Return dasd_device structure associated with cdev. 631 * Return dasd_device structure associated with cdev.
632 * This function needs to be called with the ccw device
633 * lock held. It can be used from interrupt context.
634 */
635struct dasd_device *
636dasd_device_from_cdev_locked(struct ccw_device *cdev)
637{
638 struct dasd_device *device = cdev->dev.driver_data;
639
640 if (!device)
641 return ERR_PTR(-ENODEV);
642 dasd_get_device(device);
643 return device;
644}
645
646/*
647 * Return dasd_device structure associated with cdev.
618 */ 648 */
619struct dasd_device * 649struct dasd_device *
620dasd_device_from_cdev(struct ccw_device *cdev) 650dasd_device_from_cdev(struct ccw_device *cdev)
621{ 651{
622 struct dasd_devmap *devmap;
623 struct dasd_device *device; 652 struct dasd_device *device;
653 unsigned long flags;
624 654
625 device = ERR_PTR(-ENODEV); 655 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
626 spin_lock(&dasd_devmap_lock); 656 device = dasd_device_from_cdev_locked(cdev);
627 devmap = cdev->dev.driver_data; 657 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
628 if (devmap && devmap->device) {
629 device = devmap->device;
630 dasd_get_device(device);
631 }
632 spin_unlock(&dasd_devmap_lock);
633 return device; 658 return device;
634} 659}
635 660
@@ -730,16 +755,17 @@ static ssize_t
730dasd_discipline_show(struct device *dev, struct device_attribute *attr, 755dasd_discipline_show(struct device *dev, struct device_attribute *attr,
731 char *buf) 756 char *buf)
732{ 757{
733 struct dasd_devmap *devmap; 758 struct dasd_device *device;
734 char *dname; 759 ssize_t len;
735 760
736 spin_lock(&dasd_devmap_lock); 761 device = dasd_device_from_cdev(to_ccwdev(dev));
737 dname = "none"; 762 if (!IS_ERR(device) && device->discipline) {
738 devmap = dev->driver_data; 763 len = snprintf(buf, PAGE_SIZE, "%s\n",
739 if (devmap && devmap->device && devmap->device->discipline) 764 device->discipline->name);
740 dname = devmap->device->discipline->name; 765 dasd_put_device(device);
741 spin_unlock(&dasd_devmap_lock); 766 } else
742 return snprintf(buf, PAGE_SIZE, "%s\n", dname); 767 len = snprintf(buf, PAGE_SIZE, "none\n");
768 return len;
743} 769}
744 770
745static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 771static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index da65f1b032f5..e0bf30ebb215 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -678,7 +678,7 @@ int __init dasd_eer_init(void)
678 return 0; 678 return 0;
679} 679}
680 680
681void __exit dasd_eer_exit(void) 681void dasd_eer_exit(void)
682{ 682{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
684} 684}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3ccf06d28ba1..9f52004f6fc2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
534void dasd_remove_sysfs_files(struct ccw_device *); 534void dasd_remove_sysfs_files(struct ccw_device *);
535 535
536struct dasd_device *dasd_device_from_cdev(struct ccw_device *); 536struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
537struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
537struct dasd_device *dasd_device_from_devindex(int); 538struct dasd_device *dasd_device_from_devindex(int);
538 539
539int dasd_parse(void); 540int dasd_parse(void);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index ca7d51f7eccc..cab2c736683a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -453,7 +453,7 @@ static int __init xpram_init(void)
453 PRINT_WARN("No expanded memory available\n"); 453 PRINT_WARN("No expanded memory available\n");
454 return -ENODEV; 454 return -ENODEV;
455 } 455 }
456 xpram_pages = xpram_highest_page_index(); 456 xpram_pages = xpram_highest_page_index() + 1;
457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", 457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
458 xpram_pages, (unsigned long) xpram_pages*4); 458 xpram_pages, (unsigned long) xpram_pages*4);
459 rc = xpram_setup_sizes(xpram_pages); 459 rc = xpram_setup_sizes(xpram_pages);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0c0162ff6c0c..c3e97b4fc186 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o 28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
new file mode 100644
index 000000000000..1e3939aeb8ab
--- /dev/null
+++ b/drivers/s390/char/monwriter.c
@@ -0,0 +1,292 @@
1/*
2 * drivers/s390/char/monwriter.c
3 *
4 * Character device driver for writing z/VM *MONITOR service records.
5 *
6 * Copyright (C) IBM Corp. 2006
7 *
8 * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/ctype.h>
19#include <linux/poll.h>
20#include <asm/uaccess.h>
21#include <asm/ebcdic.h>
22#include <asm/io.h>
23#include <asm/appldata.h>
24#include <asm/monwriter.h>
25
26#define MONWRITE_MAX_DATALEN 4024
27
28static int mon_max_bufs = 255;
29
30struct mon_buf {
31 struct list_head list;
32 struct monwrite_hdr hdr;
33 int diag_done;
34 char *data;
35};
36
37struct mon_private {
38 struct list_head list;
39 struct monwrite_hdr hdr;
40 size_t hdr_to_read;
41 size_t data_to_read;
42 struct mon_buf *current_buf;
43 int mon_buf_count;
44};
45
46/*
47 * helper functions
48 */
49
50static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
51{
52 struct appldata_product_id id;
53 int rc;
54
55 strcpy(id.prod_nr, "LNXAPPL");
56 id.prod_fn = myhdr->applid;
57 id.record_nr = myhdr->record_num;
58 id.version_nr = myhdr->version;
59 id.release_nr = myhdr->release;
60 id.mod_lvl = myhdr->mod_level;
61 rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
62 if (rc <= 0)
63 return rc;
64 if (rc == 5)
65 return -EPERM;
66 printk("DIAG X'DC' error with return code: %i\n", rc);
67 return -EINVAL;
68}
69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr)
72{
73 struct mon_buf *entry, *next;
74
75 list_for_each_entry_safe(entry, next, &monpriv->list, list)
76 if (entry->hdr.applid == monhdr->applid &&
77 entry->hdr.record_num == monhdr->record_num &&
78 entry->hdr.version == monhdr->version &&
79 entry->hdr.release == monhdr->release &&
80 entry->hdr.mod_level == monhdr->mod_level)
81 return entry;
82 return NULL;
83}
84
85static int monwrite_new_hdr(struct mon_private *monpriv)
86{
87 struct monwrite_hdr *monhdr = &monpriv->hdr;
88 struct mon_buf *monbuf;
89 int rc;
90
91 if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
92 monhdr->mon_function > MONWRITE_START_CONFIG ||
93 monhdr->hdrlen != sizeof(struct monwrite_hdr))
94 return -EINVAL;
95 monbuf = monwrite_find_hdr(monpriv, monhdr);
96 if (monbuf) {
97 if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
98 monhdr->datalen = monbuf->hdr.datalen;
99 rc = monwrite_diag(monhdr, monbuf->data,
100 APPLDATA_STOP_REC);
101 list_del(&monbuf->list);
102 monpriv->mon_buf_count--;
103 kfree(monbuf->data);
104 kfree(monbuf);
105 monbuf = NULL;
106 }
107 } else {
108 if (monpriv->mon_buf_count >= mon_max_bufs)
109 return -ENOSPC;
110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
111 if (!monbuf)
112 return -ENOMEM;
113 monbuf->data = kzalloc(monbuf->hdr.datalen,
114 GFP_KERNEL | GFP_DMA);
115 if (!monbuf->data) {
116 kfree(monbuf);
117 return -ENOMEM;
118 }
119 monbuf->hdr = *monhdr;
120 list_add_tail(&monbuf->list, &monpriv->list);
121 monpriv->mon_buf_count++;
122 }
123 monpriv->current_buf = monbuf;
124 return 0;
125}
126
127static int monwrite_new_data(struct mon_private *monpriv)
128{
129 struct monwrite_hdr *monhdr = &monpriv->hdr;
130 struct mon_buf *monbuf = monpriv->current_buf;
131 int rc = 0;
132
133 switch (monhdr->mon_function) {
134 case MONWRITE_START_INTERVAL:
135 if (!monbuf->diag_done) {
136 rc = monwrite_diag(monhdr, monbuf->data,
137 APPLDATA_START_INTERVAL_REC);
138 monbuf->diag_done = 1;
139 }
140 break;
141 case MONWRITE_START_CONFIG:
142 if (!monbuf->diag_done) {
143 rc = monwrite_diag(monhdr, monbuf->data,
144 APPLDATA_START_CONFIG_REC);
145 monbuf->diag_done = 1;
146 }
147 break;
148 case MONWRITE_GEN_EVENT:
149 rc = monwrite_diag(monhdr, monbuf->data,
150 APPLDATA_GEN_EVENT_REC);
151 list_del(&monpriv->current_buf->list);
152 kfree(monpriv->current_buf->data);
153 kfree(monpriv->current_buf);
154 monpriv->current_buf = NULL;
155 break;
156 default:
157 /* monhdr->mon_function is checked in monwrite_new_hdr */
158 BUG();
159 }
160 return rc;
161}
162
163/*
164 * file operations
165 */
166
167static int monwrite_open(struct inode *inode, struct file *filp)
168{
169 struct mon_private *monpriv;
170
171 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
172 if (!monpriv)
173 return -ENOMEM;
174 INIT_LIST_HEAD(&monpriv->list);
175 monpriv->hdr_to_read = sizeof(monpriv->hdr);
176 filp->private_data = monpriv;
177 return nonseekable_open(inode, filp);
178}
179
180static int monwrite_close(struct inode *inode, struct file *filp)
181{
182 struct mon_private *monpriv = filp->private_data;
183 struct mon_buf *entry, *next;
184
185 list_for_each_entry_safe(entry, next, &monpriv->list, list) {
186 if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
187 monwrite_diag(&entry->hdr, entry->data,
188 APPLDATA_STOP_REC);
189 monpriv->mon_buf_count--;
190 list_del(&entry->list);
191 kfree(entry->data);
192 kfree(entry);
193 }
194 kfree(monpriv);
195 return 0;
196}
197
198static ssize_t monwrite_write(struct file *filp, const char __user *data,
199 size_t count, loff_t *ppos)
200{
201 struct mon_private *monpriv = filp->private_data;
202 size_t len, written;
203 void *to;
204 int rc;
205
206 for (written = 0; written < count; ) {
207 if (monpriv->hdr_to_read) {
208 len = min(count - written, monpriv->hdr_to_read);
209 to = (char *) &monpriv->hdr +
210 sizeof(monpriv->hdr) - monpriv->hdr_to_read;
211 if (copy_from_user(to, data + written, len)) {
212 rc = -EFAULT;
213 goto out_error;
214 }
215 monpriv->hdr_to_read -= len;
216 written += len;
217 if (monpriv->hdr_to_read > 0)
218 continue;
219 rc = monwrite_new_hdr(monpriv);
220 if (rc)
221 goto out_error;
222 monpriv->data_to_read = monpriv->current_buf ?
223 monpriv->current_buf->hdr.datalen : 0;
224 }
225
226 if (monpriv->data_to_read) {
227 len = min(count - written, monpriv->data_to_read);
228 to = monpriv->current_buf->data +
229 monpriv->hdr.datalen - monpriv->data_to_read;
230 if (copy_from_user(to, data + written, len)) {
231 rc = -EFAULT;
232 goto out_error;
233 }
234 monpriv->data_to_read -= len;
235 written += len;
236 if (monpriv->data_to_read > 0)
237 continue;
238 rc = monwrite_new_data(monpriv);
239 if (rc)
240 goto out_error;
241 }
242 monpriv->hdr_to_read = sizeof(monpriv->hdr);
243 }
244 return written;
245
246out_error:
247 monpriv->data_to_read = 0;
248 monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
249 return rc;
250}
251
252static struct file_operations monwrite_fops = {
253 .owner = THIS_MODULE,
254 .open = &monwrite_open,
255 .release = &monwrite_close,
256 .write = &monwrite_write,
257};
258
259static struct miscdevice mon_dev = {
260 .name = "monwriter",
261 .fops = &monwrite_fops,
262 .minor = MISC_DYNAMIC_MINOR,
263};
264
265/*
266 * module init/exit
267 */
268
269static int __init mon_init(void)
270{
271 if (MACHINE_IS_VM)
272 return misc_register(&mon_dev);
273 else
274 return -ENODEV;
275}
276
277static void __exit mon_exit(void)
278{
279 WARN_ON(misc_deregister(&mon_dev) != 0);
280}
281
282module_init(mon_init);
283module_exit(mon_exit);
284
285module_param_named(max_bufs, mon_max_bufs, int, 0644);
286MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers"
287 "that can be active at one time");
288
289MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
290MODULE_DESCRIPTION("Character device driver for writing z/VM "
291 "APPLDATA monitor records.");
292MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 19762f3476aa..1678b6c757ec 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004,2005 IBM Corporation 2 * Copyright (C) 2004,2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
5 * 5 *
6 * 6 *
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 87389e730465..8a5975f3dad7 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 IBM Corporation 2 * Copyright (C) 2004, 2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Version 1.0 4 * Version 1.0
5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
6 * 6 *
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c28444af0919..3bb4e472d73d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -256,7 +256,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
256 /* trigger path verification. */ 256 /* trigger path verification. */
257 if (sch->driver && sch->driver->verify) 257 if (sch->driver && sch->driver->verify)
258 sch->driver->verify(&sch->dev); 258 sch->driver->verify(&sch->dev);
259 else if (sch->vpm == mask) 259 else if (sch->lpm == mask)
260 goto out_unreg; 260 goto out_unreg;
261out_unlock: 261out_unlock:
262 spin_unlock_irq(&sch->lock); 262 spin_unlock_irq(&sch->lock);
@@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
378 378
379 if (chp_mask == 0) { 379 if (chp_mask == 0) {
380 spin_unlock_irq(&sch->lock); 380 spin_unlock_irq(&sch->lock);
381 put_device(&sch->dev);
381 return 0; 382 return 0;
382 } 383 }
383 old_lpm = sch->lpm; 384 old_lpm = sch->lpm;
@@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
392 393
393 spin_unlock_irq(&sch->lock); 394 spin_unlock_irq(&sch->lock);
394 put_device(&sch->dev); 395 put_device(&sch->dev);
395 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 396 return 0;
396} 397}
397 398
398 399
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89320c1ad825..2e2882daefbb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -16,11 +16,10 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19
20#include <asm/cio.h> 19#include <asm/cio.h>
21#include <asm/delay.h> 20#include <asm/delay.h>
22#include <asm/irq.h> 21#include <asm/irq.h>
23 22#include <asm/setup.h>
24#include "airq.h" 23#include "airq.h"
25#include "cio.h" 24#include "cio.h"
26#include "css.h" 25#include "css.h"
@@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
192 sch->orb.pfch = sch->options.prefetch == 0; 191 sch->orb.pfch = sch->options.prefetch == 0;
193 sch->orb.spnd = sch->options.suspend; 192 sch->orb.spnd = sch->options.suspend;
194 sch->orb.ssic = sch->options.suspend && sch->options.inter; 193 sch->orb.ssic = sch->options.suspend && sch->options.inter;
195 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 194 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
196#ifdef CONFIG_64BIT 195#ifdef CONFIG_64BIT
197 /* 196 /*
198 * for 64 bit we always support 64 bit IDAWs with 4k page size only 197 * for 64 bit we always support 64 bit IDAWs with 4k page size only
@@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
570 sch->opm = 0xff; 569 sch->opm = 0xff;
571 if (!cio_is_console(sch->schid)) 570 if (!cio_is_console(sch->schid))
572 chsc_validate_chpids(sch); 571 chsc_validate_chpids(sch);
573 sch->lpm = sch->schib.pmcw.pim & 572 sch->lpm = sch->schib.pmcw.pam & sch->opm;
574 sch->schib.pmcw.pam &
575 sch->schib.pmcw.pom &
576 sch->opm;
577 573
578 CIO_DEBUG(KERN_INFO, 0, 574 CIO_DEBUG(KERN_INFO, 0,
579 "Detected device %04x on subchannel 0.%x.%04X" 575 "Detected device %04x on subchannel 0.%x.%04X"
@@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid)
841 return -EBUSY; 837 return -EBUSY;
842} 838}
843 839
844extern void do_reipl(unsigned long devno); 840struct sch_match_id {
845static int 841 struct subchannel_id schid;
846__shutdown_subchannel_easy(struct subchannel_id schid, void *data) 842 struct ccw_dev_id devid;
843 int rc;
844};
845
846static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
847 void *data)
847{ 848{
848 struct schib schib; 849 struct schib schib;
850 struct sch_match_id *match_id = data;
849 851
850 if (stsch_err(schid, &schib)) 852 if (stsch_err(schid, &schib))
851 return -ENXIO; 853 return -ENXIO;
854 if (match_id && schib.pmcw.dnv &&
855 (schib.pmcw.dev == match_id->devid.devno) &&
856 (schid.ssid == match_id->devid.ssid)) {
857 match_id->schid = schid;
858 match_id->rc = 0;
859 }
852 if (!schib.pmcw.ena) 860 if (!schib.pmcw.ena)
853 return 0; 861 return 0;
854 switch(__disable_subchannel_easy(schid, &schib)) { 862 switch(__disable_subchannel_easy(schid, &schib)) {
@@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
864 return 0; 872 return 0;
865} 873}
866 874
867void 875static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
868clear_all_subchannels(void) 876 struct subchannel_id *schid)
869{ 877{
878 struct sch_match_id match_id;
879
880 match_id.devid = *devid;
881 match_id.rc = -ENODEV;
870 local_irq_disable(); 882 local_irq_disable();
871 for_each_subchannel(__shutdown_subchannel_easy, NULL); 883 for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
884 if (match_id.rc == 0)
885 *schid = match_id.schid;
886 return match_id.rc;
872} 887}
873 888
889
890void clear_all_subchannels(void)
891{
892 local_irq_disable();
893 for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
894}
895
896extern void do_reipl_asm(__u32 schid);
897
874/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 898/* Make sure all subchannels are quiet before we re-ipl an lpar. */
875void 899void reipl_ccw_dev(struct ccw_dev_id *devid)
876reipl(unsigned long devno)
877{ 900{
878 clear_all_subchannels(); 901 struct subchannel_id schid;
902
903 if (clear_all_subchannels_and_match(devid, &schid))
904 panic("IPL Device not found\n");
879 cio_reset_channel_paths(); 905 cio_reset_channel_paths();
880 do_reipl(devno); 906 do_reipl_asm(*((__u32*)&schid));
907}
908
909extern struct schib ipl_schib;
910
911/*
912 * ipl_save_parameters gets called very early. It is not allowed to access
913 * anything in the bss section at all. The bss section is not cleared yet,
914 * but may contain some ipl parameters written by the firmware.
915 * These parameters (if present) are copied to 0x2000.
916 * To avoid corruption of the ipl parameters, all variables used by this
917 * function must reside on the stack or in the data section.
918 */
919void ipl_save_parameters(void)
920{
921 struct subchannel_id schid;
922 unsigned int *ipl_ptr;
923 void *src, *dst;
924
925 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
926 if (!schid.one)
927 return;
928 if (stsch(schid, &ipl_schib))
929 return;
930 if (!ipl_schib.pmcw.dnv)
931 return;
932 ipl_devno = ipl_schib.pmcw.dev;
933 ipl_flags |= IPL_DEVNO_VALID;
934 if (!ipl_schib.pmcw.qf)
935 return;
936 ipl_flags |= IPL_PARMBLOCK_VALID;
937 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
938 src = (void *)(unsigned long)*ipl_ptr;
939 dst = (void *)IPL_PARMBLOCK_ORIGIN;
940 memmove(dst, src, PAGE_SIZE);
941 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
881} 942}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 13eeea3d547f..7086a74e9871 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid)
182 return dev ? to_subchannel(dev) : NULL; 182 return dev ? to_subchannel(dev) : NULL;
183} 183}
184 184
185 185static inline int css_get_subchannel_status(struct subchannel *sch)
186static inline int
187css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
188{ 186{
189 struct schib schib; 187 struct schib schib;
190 int cc;
191 188
192 cc = stsch(schid, &schib); 189 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
193 if (cc)
194 return CIO_GONE;
195 if (!schib.pmcw.dnv)
196 return CIO_GONE; 190 return CIO_GONE;
197 if (sch && sch->schib.pmcw.dnv && 191 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
198 (schib.pmcw.dev != sch->schib.pmcw.dev))
199 return CIO_REVALIDATE; 192 return CIO_REVALIDATE;
200 if (sch && !sch->lpm) 193 if (!sch->lpm)
201 return CIO_NO_PATH; 194 return CIO_NO_PATH;
202 return CIO_OPER; 195 return CIO_OPER;
203} 196}
204 197
205static int 198static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
206css_evaluate_subchannel(struct subchannel_id schid, int slow)
207{ 199{
208 int event, ret, disc; 200 int event, ret, disc;
209 struct subchannel *sch;
210 unsigned long flags; 201 unsigned long flags;
202 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
211 203
212 sch = get_subchannel_by_schid(schid); 204 spin_lock_irqsave(&sch->lock, flags);
213 disc = sch ? device_is_disconnected(sch) : 0; 205 disc = device_is_disconnected(sch);
214 if (disc && slow) { 206 if (disc && slow) {
215 if (sch) 207 /* Disconnected devices are evaluated directly only.*/
216 put_device(&sch->dev); 208 spin_unlock_irqrestore(&sch->lock, flags);
217 return 0; /* Already processed. */ 209 return 0;
218 } 210 }
219 /* 211 /* No interrupt after machine check - kill pending timers. */
220 * We've got a machine check, so running I/O won't get an interrupt. 212 device_kill_pending_timer(sch);
221 * Kill any pending timers.
222 */
223 if (sch)
224 device_kill_pending_timer(sch);
225 if (!disc && !slow) { 213 if (!disc && !slow) {
226 if (sch) 214 /* Non-disconnected devices are evaluated on the slow path. */
227 put_device(&sch->dev); 215 spin_unlock_irqrestore(&sch->lock, flags);
228 return -EAGAIN; /* Will be done on the slow path. */ 216 return -EAGAIN;
229 } 217 }
230 event = css_get_subchannel_status(sch, schid); 218 event = css_get_subchannel_status(sch);
231 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 219 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
232 schid.ssid, schid.sch_no, event, 220 sch->schid.ssid, sch->schid.sch_no, event,
233 sch?(disc?"disconnected":"normal"):"unknown", 221 disc ? "disconnected" : "normal",
234 slow?"slow":"fast"); 222 slow ? "slow" : "fast");
223 /* Analyze subchannel status. */
224 action = NONE;
235 switch (event) { 225 switch (event) {
236 case CIO_NO_PATH: 226 case CIO_NO_PATH:
237 case CIO_GONE: 227 if (disc) {
238 if (!sch) { 228 /* Check if paths have become available. */
239 /* Never used this subchannel. Ignore. */ 229 action = REPROBE;
240 ret = 0;
241 break; 230 break;
242 } 231 }
243 if (disc && (event == CIO_NO_PATH)) { 232 /* fall through */
244 /* 233 case CIO_GONE:
245 * Uargh, hack again. Because we don't get a machine 234 /* Prevent unwanted effects when opening lock. */
246 * check on configure on, our path bookkeeping can 235 cio_disable_subchannel(sch);
247 * be out of date here (it's fine while we only do 236 device_set_disconnected(sch);
248 * logical varying or get chsc machine checks). We 237 /* Ask driver what to do with device. */
249 * need to force reprobing or we might miss devices 238 action = UNREGISTER;
250 * coming operational again. It won't do harm in real 239 if (sch->driver && sch->driver->notify) {
251 * no path situations.
252 */
253 spin_lock_irqsave(&sch->lock, flags);
254 device_trigger_reprobe(sch);
255 spin_unlock_irqrestore(&sch->lock, flags); 240 spin_unlock_irqrestore(&sch->lock, flags);
256 ret = 0; 241 ret = sch->driver->notify(&sch->dev, event);
257 break; 242 spin_lock_irqsave(&sch->lock, flags);
258 } 243 if (ret)
259 if (sch->driver && sch->driver->notify && 244 action = NONE;
260 sch->driver->notify(&sch->dev, event)) {
261 cio_disable_subchannel(sch);
262 device_set_disconnected(sch);
263 ret = 0;
264 break;
265 } 245 }
266 /*
267 * Unregister subchannel.
268 * The device will be killed automatically.
269 */
270 cio_disable_subchannel(sch);
271 css_sch_device_unregister(sch);
272 /* Reset intparm to zeroes. */
273 sch->schib.pmcw.intparm = 0;
274 cio_modify(sch);
275 put_device(&sch->dev);
276 ret = 0;
277 break; 246 break;
278 case CIO_REVALIDATE: 247 case CIO_REVALIDATE:
279 /* 248 /* Device will be removed, so no notify necessary. */
280 * Revalidation machine check. Sick. 249 if (disc)
281 * We don't notify the driver since we have to throw the device 250 /* Reprobe because immediate unregister might block. */
282 * away in any case. 251 action = REPROBE;
283 */ 252 else
284 if (!disc) { 253 action = UNREGISTER_PROBE;
285 css_sch_device_unregister(sch);
286 /* Reset intparm to zeroes. */
287 sch->schib.pmcw.intparm = 0;
288 cio_modify(sch);
289 put_device(&sch->dev);
290 ret = css_probe_device(schid);
291 } else {
292 /*
293 * We can't immediately deregister the disconnected
294 * device since it might block.
295 */
296 spin_lock_irqsave(&sch->lock, flags);
297 device_trigger_reprobe(sch);
298 spin_unlock_irqrestore(&sch->lock, flags);
299 ret = 0;
300 }
301 break; 254 break;
302 case CIO_OPER: 255 case CIO_OPER:
303 if (disc) { 256 if (disc)
304 spin_lock_irqsave(&sch->lock, flags);
305 /* Get device operational again. */ 257 /* Get device operational again. */
306 device_trigger_reprobe(sch); 258 action = REPROBE;
307 spin_unlock_irqrestore(&sch->lock, flags); 259 break;
308 } 260 }
309 ret = sch ? 0 : css_probe_device(schid); 261 /* Perform action. */
262 ret = 0;
263 switch (action) {
264 case UNREGISTER:
265 case UNREGISTER_PROBE:
266 /* Unregister device (will use subchannel lock). */
267 spin_unlock_irqrestore(&sch->lock, flags);
268 css_sch_device_unregister(sch);
269 spin_lock_irqsave(&sch->lock, flags);
270
271 /* Reset intparm to zeroes. */
272 sch->schib.pmcw.intparm = 0;
273 cio_modify(sch);
274
275 /* Probe if necessary. */
276 if (action == UNREGISTER_PROBE)
277 ret = css_probe_device(sch->schid);
278 break;
279 case REPROBE:
280 device_trigger_reprobe(sch);
310 break; 281 break;
311 default: 282 default:
312 BUG(); 283 break;
313 ret = 0; 284 }
285 spin_unlock_irqrestore(&sch->lock, flags);
286
287 return ret;
288}
289
290static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
291{
292 struct schib schib;
293
294 if (!slow) {
295 /* Will be done on the slow path. */
296 return -EAGAIN;
314 } 297 }
298 if (stsch(schid, &schib) || !schib.pmcw.dnv) {
299 /* Unusable - ignore. */
300 return 0;
301 }
302 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
303 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
304
305 return css_probe_device(schid);
306}
307
308static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
309{
310 struct subchannel *sch;
311 int ret;
312
313 sch = get_subchannel_by_schid(schid);
314 if (sch) {
315 ret = css_evaluate_known_subchannel(sch, slow);
316 put_device(&sch->dev);
317 } else
318 ret = css_evaluate_new_subchannel(schid, slow);
319
315 return ret; 320 return ret;
316} 321}
317 322
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 646da5640401..688945662c15 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
52 return 1; 52 return 1;
53} 53}
54 54
55/* 55/* Store modalias string delimited by prefix/suffix string into buffer with
56 * Hotplugging interface for ccw devices. 56 * specified size. Return length of resulting string (excluding trailing '\0')
57 * Heavily modeled on pci and usb hotplug. 57 * even if string doesn't fit buffer (snprintf semantics). */
58 */ 58static int snprint_alias(char *buf, size_t size, const char *prefix,
59static int 59 struct ccw_device_id *id, const char *suffix)
60ccw_uevent (struct device *dev, char **envp, int num_envp,
61 char *buffer, int buffer_size)
62{ 60{
63 struct ccw_device *cdev = to_ccwdev(dev); 61 int len;
64 int i = 0;
65 int length = 0;
66 62
67 if (!cdev) 63 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
68 return -ENODEV; 64 id->cu_model);
65 if (len > size)
66 return len;
67 buf += len;
68 size -= len;
69 69
70 /* what we want to pass to /sbin/hotplug */ 70 if (id->dev_type != 0)
71 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
72 id->dev_model, suffix);
73 else
74 len += snprintf(buf, size, "dtdm%s", suffix);
71 75
72 envp[i++] = buffer; 76 return len;
73 length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", 77}
74 cdev->id.cu_type);
75 if ((buffer_size - length <= 0) || (i >= num_envp))
76 return -ENOMEM;
77 ++length;
78 buffer += length;
79 78
79/* Set up environment variables for ccw device uevent. Return 0 on success,
80 * non-zero otherwise. */
81static int ccw_uevent(struct device *dev, char **envp, int num_envp,
82 char *buffer, int buffer_size)
83{
84 struct ccw_device *cdev = to_ccwdev(dev);
85 struct ccw_device_id *id = &(cdev->id);
86 int i = 0;
87 int len;
88
89 /* CU_TYPE= */
90 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
91 if (len > buffer_size || i >= num_envp)
92 return -ENOMEM;
80 envp[i++] = buffer; 93 envp[i++] = buffer;
81 length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", 94 buffer += len;
82 cdev->id.cu_model); 95 buffer_size -= len;
83 if ((buffer_size - length <= 0) || (i >= num_envp)) 96
97 /* CU_MODEL= */
98 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
99 if (len > buffer_size || i >= num_envp)
84 return -ENOMEM; 100 return -ENOMEM;
85 ++length; 101 envp[i++] = buffer;
86 buffer += length; 102 buffer += len;
103 buffer_size -= len;
87 104
88 /* The next two can be zero, that's ok for us */ 105 /* The next two can be zero, that's ok for us */
89 envp[i++] = buffer; 106 /* DEV_TYPE= */
90 length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", 107 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
91 cdev->id.dev_type); 108 if (len > buffer_size || i >= num_envp)
92 if ((buffer_size - length <= 0) || (i >= num_envp))
93 return -ENOMEM; 109 return -ENOMEM;
94 ++length; 110 envp[i++] = buffer;
95 buffer += length; 111 buffer += len;
112 buffer_size -= len;
96 113
114 /* DEV_MODEL= */
115 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
116 (unsigned char) id->dev_model) + 1;
117 if (len > buffer_size || i >= num_envp)
118 return -ENOMEM;
97 envp[i++] = buffer; 119 envp[i++] = buffer;
98 length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", 120 buffer += len;
99 cdev->id.dev_model); 121 buffer_size -= len;
100 if ((buffer_size - length <= 0) || (i >= num_envp)) 122
123 /* MODALIAS= */
124 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
125 if (len > buffer_size || i >= num_envp)
101 return -ENOMEM; 126 return -ENOMEM;
127 envp[i++] = buffer;
128 buffer += len;
129 buffer_size -= len;
102 130
103 envp[i] = NULL; 131 envp[i] = NULL;
104 132
@@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
251{ 279{
252 struct ccw_device *cdev = to_ccwdev(dev); 280 struct ccw_device *cdev = to_ccwdev(dev);
253 struct ccw_device_id *id = &(cdev->id); 281 struct ccw_device_id *id = &(cdev->id);
254 int ret; 282 int len;
255 283
256 ret = sprintf(buf, "ccw:t%04Xm%02X", 284 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
257 id->cu_type, id->cu_model); 285
258 if (id->dev_type != 0) 286 return len > PAGE_SIZE ? PAGE_SIZE : len;
259 ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
260 id->dev_type, id->dev_model);
261 else
262 ret += sprintf(buf + ret, "dtdm\n");
263 return ret;
264} 287}
265 288
266static ssize_t 289static ssize_t
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 35e162ba6d54..dace46fc32e8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->schid, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pam & sch->opm;
236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom &
238 sch->opm;
239 /* Check since device may again have become not operational. */ 236 /* Check since device may again have become not operational. */
240 if (!sch->schib.pmcw.dnv) 237 if (!sch->schib.pmcw.dnv)
241 state = DEV_STATE_NOT_OPER; 238 state = DEV_STATE_NOT_OPER;
@@ -267,6 +264,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
267 notify = 1; 264 notify = 1;
268 } 265 }
269 /* fill out sense information */ 266 /* fill out sense information */
267 memset(&cdev->id, 0, sizeof(cdev->id));
270 cdev->id.cu_type = cdev->private->senseid.cu_type; 268 cdev->id.cu_type = cdev->private->senseid.cu_type;
271 cdev->id.cu_model = cdev->private->senseid.cu_model; 269 cdev->id.cu_model = cdev->private->senseid.cu_model;
272 cdev->id.dev_type = cdev->private->senseid.dev_type; 270 cdev->id.dev_type = cdev->private->senseid.dev_type;
@@ -454,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
454 return; 452 return;
455 } 453 }
456 /* Start Path Group verification. */ 454 /* Start Path Group verification. */
457 sch->vpm = 0; /* Start with no path groups set. */
458 cdev->private->state = DEV_STATE_VERIFY; 455 cdev->private->state = DEV_STATE_VERIFY;
456 cdev->private->flags.doverify = 0;
459 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
460} 458}
461 459
@@ -555,7 +553,19 @@ ccw_device_nopath_notify(void *data)
555void 553void
556ccw_device_verify_done(struct ccw_device *cdev, int err) 554ccw_device_verify_done(struct ccw_device *cdev, int err)
557{ 555{
558 cdev->private->flags.doverify = 0; 556 struct subchannel *sch;
557
558 sch = to_subchannel(cdev->dev.parent);
559 /* Update schib - pom may have changed. */
560 stsch(sch->schid, &sch->schib);
561 /* Update lpm with verified path mask. */
562 sch->lpm = sch->vpm;
563 /* Repeat path verification? */
564 if (cdev->private->flags.doverify) {
565 cdev->private->flags.doverify = 0;
566 ccw_device_verify_start(cdev);
567 return;
568 }
559 switch (err) { 569 switch (err) {
560 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 570 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
561 cdev->private->options.pgroup = 0; 571 cdev->private->options.pgroup = 0;
@@ -613,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev)
613 if (!cdev->private->options.pgroup) { 623 if (!cdev->private->options.pgroup) {
614 /* Start initial path verification. */ 624 /* Start initial path verification. */
615 cdev->private->state = DEV_STATE_VERIFY; 625 cdev->private->state = DEV_STATE_VERIFY;
626 cdev->private->flags.doverify = 0;
616 ccw_device_verify_start(cdev); 627 ccw_device_verify_start(cdev);
617 return 0; 628 return 0;
618 } 629 }
@@ -659,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev)
659 /* Are we doing path grouping? */ 670 /* Are we doing path grouping? */
660 if (!cdev->private->options.pgroup) { 671 if (!cdev->private->options.pgroup) {
661 /* No, set state offline immediately. */ 672 /* No, set state offline immediately. */
662 sch->vpm = 0;
663 ccw_device_done(cdev, DEV_STATE_OFFLINE); 673 ccw_device_done(cdev, DEV_STATE_OFFLINE);
664 return 0; 674 return 0;
665 } 675 }
@@ -780,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
780 } 790 }
781 /* Device is idle, we can do the path verification. */ 791 /* Device is idle, we can do the path verification. */
782 cdev->private->state = DEV_STATE_VERIFY; 792 cdev->private->state = DEV_STATE_VERIFY;
793 cdev->private->flags.doverify = 0;
783 ccw_device_verify_start(cdev); 794 ccw_device_verify_start(cdev);
784} 795}
785 796
@@ -1042,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1042} 1053}
1043 1054
1044static void 1055static void
1045ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) 1056ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1046{ 1057{
1047 /* When the I/O has terminated, we have to start verification. */ 1058 /* Start verification after current task finished. */
1048 cdev->private->flags.doverify = 1; 1059 cdev->private->flags.doverify = 1;
1049} 1060}
1050 1061
@@ -1110,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch)
1110 * The pim, pam, pom values may not be accurate, but they are the best 1121 * The pim, pam, pom values may not be accurate, but they are the best
1111 * we have before performing device selection :/ 1122 * we have before performing device selection :/
1112 */ 1123 */
1113 sch->lpm = sch->schib.pmcw.pim & 1124 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1114 sch->schib.pmcw.pam &
1115 sch->schib.pmcw.pom &
1116 sch->opm;
1117 /* Re-set some bits in the pmcw that were lost. */ 1125 /* Re-set some bits in the pmcw that were lost. */
1118 sch->schib.pmcw.isc = 3; 1126 sch->schib.pmcw.isc = 3;
1119 sch->schib.pmcw.csense = 1; 1127 sch->schib.pmcw.csense = 1;
@@ -1237,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1237 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1245 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1238 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1246 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1239 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1247 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1240 [DEV_EVENT_VERIFY] = ccw_device_nop, 1248 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1241 }, 1249 },
1242 [DEV_STATE_ONLINE] = { 1250 [DEV_STATE_ONLINE] = {
1243 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1251 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
@@ -1280,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1280 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1288 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1281 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1289 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1282 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1290 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1283 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, 1291 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1284 }, 1292 },
1285 [DEV_STATE_QUIESCE] = { 1293 [DEV_STATE_QUIESCE] = {
1286 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1294 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
@@ -1293,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1293 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1301 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1294 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1302 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1295 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1303 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1296 [DEV_EVENT_VERIFY] = ccw_device_nop, 1304 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1297 }, 1305 },
1298 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1306 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1299 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1307 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 9e3de0bd59b5..93a897eebfff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
96 ret = cio_set_options (sch, flags); 96 ret = cio_set_options (sch, flags);
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 /* Adjust requested path mask to excluded varied off paths. */
100 if (lpm) {
101 lpm &= sch->opm;
102 if (lpm == 0)
103 return -EACCES;
104 }
99 ret = cio_start_key (sch, cpa, lpm, key); 105 ret = cio_start_key (sch, cpa, lpm, key);
100 if (ret == 0) 106 if (ret == 0)
101 cdev->private->intparm = intparm; 107 cdev->private->intparm = intparm;
@@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
250 if (!sch) 256 if (!sch)
251 return 0; 257 return 0;
252 else 258 else
253 return sch->vpm; 259 return sch->lpm;
254} 260}
255 261
256static void 262static void
@@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
304 sch = to_subchannel(cdev->dev.parent); 310 sch = to_subchannel(cdev->dev.parent);
305 do { 311 do {
306 ret = cio_start (sch, ccw, lpm); 312 ret = cio_start (sch, ccw, lpm);
307 if ((ret == -EBUSY) || (ret == -EACCES)) { 313 if (ret == -EBUSY) {
308 /* Try again later. */ 314 /* Try again later. */
309 spin_unlock_irq(&sch->lock); 315 spin_unlock_irq(&sch->lock);
310 msleep(10); 316 msleep(10);
@@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
433 if (!ciw || ciw->cmd == 0) 439 if (!ciw || ciw->cmd == 0)
434 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
435 441
442 /* Adjust requested path mask to excluded varied off paths. */
443 if (lpm) {
444 lpm &= sch->opm;
445 if (lpm == 0)
446 return -EACCES;
447 }
448
436 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 449 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
437 if (!rcd_ccw) 450 if (!rcd_ccw)
438 return -ENOMEM; 451 return -ENOMEM;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 1693a102dcfe..8ca2d078848c 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -245,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
245 memset(&cdev->private->irb, 0, sizeof(struct irb)); 245 memset(&cdev->private->irb, 0, sizeof(struct irb));
246 246
247 /* Try multiple times. */ 247 /* Try multiple times. */
248 ret = -ENODEV; 248 ret = -EACCES;
249 if (cdev->private->iretry > 0) { 249 if (cdev->private->iretry > 0) {
250 cdev->private->iretry--; 250 cdev->private->iretry--;
251 ret = cio_start (sch, cdev->private->iccws, 251 ret = cio_start (sch, cdev->private->iccws,
252 cdev->private->imask); 252 cdev->private->imask);
253 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 253 /* We expect an interrupt in case of success or busy
254 if ((ret != -EACCES) && (ret != -ENODEV)) 254 * indication. */
255 if ((ret == 0) || (ret == -EBUSY))
255 return ret; 256 return ret;
256 } 257 }
257 /* PGID command failed on this path. Switch it off. */ 258 /* PGID command failed on this path. */
258 sch->lpm &= ~cdev->private->imask;
259 sch->vpm &= ~cdev->private->imask;
260 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 259 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
261 "0.%x.%04x, lpm %02X, became 'not operational'\n", 260 "0.%x.%04x, lpm %02X, became 'not operational'\n",
262 cdev->private->devno, sch->schid.ssid, 261 cdev->private->devno, sch->schid.ssid,
@@ -286,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
286 memset(&cdev->private->irb, 0, sizeof(struct irb)); 285 memset(&cdev->private->irb, 0, sizeof(struct irb));
287 286
288 /* Try multiple times. */ 287 /* Try multiple times. */
289 ret = -ENODEV; 288 ret = -EACCES;
290 if (cdev->private->iretry > 0) { 289 if (cdev->private->iretry > 0) {
291 cdev->private->iretry--; 290 cdev->private->iretry--;
292 ret = cio_start (sch, cdev->private->iccws, 291 ret = cio_start (sch, cdev->private->iccws,
293 cdev->private->imask); 292 cdev->private->imask);
294 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 293 /* We expect an interrupt in case of success or busy
295 if ((ret != -EACCES) && (ret != -ENODEV)) 294 * indication. */
295 if ((ret == 0) || (ret == -EBUSY))
296 return ret; 296 return ret;
297 } 297 }
298 /* nop command failed on this path. Switch it off. */ 298 /* nop command failed on this path. */
299 sch->lpm &= ~cdev->private->imask;
300 sch->vpm &= ~cdev->private->imask;
301 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 299 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
302 "0.%x.%04x, lpm %02X, became 'not operational'\n", 300 "0.%x.%04x, lpm %02X, became 'not operational'\n",
303 cdev->private->devno, sch->schid.ssid, 301 cdev->private->devno, sch->schid.ssid,
@@ -372,27 +370,32 @@ static void
372__ccw_device_verify_start(struct ccw_device *cdev) 370__ccw_device_verify_start(struct ccw_device *cdev)
373{ 371{
374 struct subchannel *sch; 372 struct subchannel *sch;
375 __u8 imask, func; 373 __u8 func;
376 int ret; 374 int ret;
377 375
378 sch = to_subchannel(cdev->dev.parent); 376 sch = to_subchannel(cdev->dev.parent);
379 while (sch->vpm != sch->lpm) { 377 /* Repeat for all paths. */
380 /* Find first unequal bit in vpm vs. lpm */ 378 for (; cdev->private->imask; cdev->private->imask >>= 1,
381 for (imask = 0x80; imask != 0; imask >>= 1) 379 cdev->private->iretry = 5) {
382 if ((sch->vpm & imask) != (sch->lpm & imask)) 380 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
383 break; 381 /* Path not available, try next. */
384 cdev->private->imask = imask; 382 continue;
385 if (cdev->private->options.pgroup) { 383 if (cdev->private->options.pgroup) {
386 func = (sch->vpm & imask) ? 384 if (sch->opm & cdev->private->imask)
387 SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; 385 func = SPID_FUNC_ESTABLISH;
386 else
387 func = SPID_FUNC_RESIGN;
388 ret = __ccw_device_do_pgid(cdev, func); 388 ret = __ccw_device_do_pgid(cdev, func);
389 } else 389 } else
390 ret = __ccw_device_do_nop(cdev); 390 ret = __ccw_device_do_nop(cdev);
391 /* We expect an interrupt in case of success or busy
392 * indication. */
391 if (ret == 0 || ret == -EBUSY) 393 if (ret == 0 || ret == -EBUSY)
392 return; 394 return;
393 cdev->private->iretry = 5; 395 /* Permanent path failure, try next. */
394 } 396 }
395 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 397 /* Done with all paths. */
398 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
396} 399}
397 400
398/* 401/*
@@ -421,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
421 else 424 else
422 ret = __ccw_device_check_nop(cdev); 425 ret = __ccw_device_check_nop(cdev);
423 memset(&cdev->private->irb, 0, sizeof(struct irb)); 426 memset(&cdev->private->irb, 0, sizeof(struct irb));
427
424 switch (ret) { 428 switch (ret) {
425 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 429 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
426 case 0: 430 case 0:
427 /* Establish or Resign Path Group done. Update vpm. */ 431 /* Path verification ccw finished successfully, update lpm. */
428 if ((sch->lpm & cdev->private->imask) != 0) 432 sch->vpm |= sch->opm & cdev->private->imask;
429 sch->vpm |= cdev->private->imask; 433 /* Go on with next path. */
430 else 434 cdev->private->imask >>= 1;
431 sch->vpm &= ~cdev->private->imask;
432 cdev->private->iretry = 5; 435 cdev->private->iretry = 5;
433 __ccw_device_verify_start(cdev); 436 __ccw_device_verify_start(cdev);
434 break; 437 break;
@@ -441,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
441 cdev->private->options.pgroup = 0; 444 cdev->private->options.pgroup = 0;
442 else 445 else
443 cdev->private->flags.pgid_single = 1; 446 cdev->private->flags.pgid_single = 1;
447 /* Retry */
448 sch->vpm = 0;
449 cdev->private->imask = 0x80;
450 cdev->private->iretry = 5;
444 /* fall through. */ 451 /* fall through. */
445 case -EAGAIN: /* Try again. */ 452 case -EAGAIN: /* Try again. */
446 __ccw_device_verify_start(cdev); 453 __ccw_device_verify_start(cdev);
@@ -449,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
449 ccw_device_verify_done(cdev, -ETIME); 456 ccw_device_verify_done(cdev, -ETIME);
450 break; 457 break;
451 case -EACCES: /* channel is not operational. */ 458 case -EACCES: /* channel is not operational. */
452 sch->lpm &= ~cdev->private->imask; 459 cdev->private->imask >>= 1;
453 sch->vpm &= ~cdev->private->imask;
454 cdev->private->iretry = 5; 460 cdev->private->iretry = 5;
455 __ccw_device_verify_start(cdev); 461 __ccw_device_verify_start(cdev);
456 break; 462 break;
@@ -463,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev)
463 struct subchannel *sch = to_subchannel(cdev->dev.parent); 469 struct subchannel *sch = to_subchannel(cdev->dev.parent);
464 470
465 cdev->private->flags.pgid_single = 0; 471 cdev->private->flags.pgid_single = 0;
472 cdev->private->imask = 0x80;
466 cdev->private->iretry = 5; 473 cdev->private->iretry = 5;
467 /* 474
468 * Update sch->lpm with current values to catch paths becoming 475 /* Start with empty vpm. */
469 * available again. 476 sch->vpm = 0;
470 */ 477
478 /* Get current pam. */
471 if (stsch(sch->schid, &sch->schib)) { 479 if (stsch(sch->schid, &sch->schib)) {
472 ccw_device_verify_done(cdev, -ENODEV); 480 ccw_device_verify_done(cdev, -ENODEV);
473 return; 481 return;
474 } 482 }
475 sch->lpm = sch->schib.pmcw.pim &
476 sch->schib.pmcw.pam &
477 sch->schib.pmcw.pom &
478 sch->opm;
479 __ccw_device_verify_start(cdev); 483 __ccw_device_verify_start(cdev);
480} 484}
481 485
@@ -524,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
524 switch (ret) { 528 switch (ret) {
525 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 529 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
526 case 0: /* disband successful. */ 530 case 0: /* disband successful. */
527 sch->vpm = 0;
528 ccw_device_disband_done(cdev, ret); 531 ccw_device_disband_done(cdev, ret);
529 break; 532 break;
530 case -EOPNOTSUPP: 533 case -EOPNOTSUPP:
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 7c93a8798d23..cde822d8b5c8 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -115,7 +115,7 @@ qdio_min(int a,int b)
115static inline __u64 115static inline __u64
116qdio_get_micros(void) 116qdio_get_micros(void)
117{ 117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */ 118 return (get_clock() >> 12); /* time>>12 is microseconds */
119} 119}
120 120
121/* 121/*
@@ -1129,7 +1129,7 @@ out:
1129 1129
1130#ifdef QDIO_USE_PROCESSING_STATE 1130#ifdef QDIO_USE_PROCESSING_STATE
1131 if (last_position>=0) 1131 if (last_position>=0)
1132 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); 1132 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1133#endif /* QDIO_USE_PROCESSING_STATE */ 1133#endif /* QDIO_USE_PROCESSING_STATE */
1134 1134
1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ceb3ab31ee08..124569362f02 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -191,49 +191,49 @@ enum qdio_irq_states {
191#if QDIO_VERBOSE_LEVEL>8 191#if QDIO_VERBOSE_LEVEL>8
192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) 192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
193#else 193#else
194#define QDIO_PRINT_STUPID(x...) 194#define QDIO_PRINT_STUPID(x...) do { } while (0)
195#endif 195#endif
196 196
197#if QDIO_VERBOSE_LEVEL>7 197#if QDIO_VERBOSE_LEVEL>7
198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
199#else 199#else
200#define QDIO_PRINT_ALL(x...) 200#define QDIO_PRINT_ALL(x...) do { } while (0)
201#endif 201#endif
202 202
203#if QDIO_VERBOSE_LEVEL>6 203#if QDIO_VERBOSE_LEVEL>6
204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
205#else 205#else
206#define QDIO_PRINT_INFO(x...) 206#define QDIO_PRINT_INFO(x...) do { } while (0)
207#endif 207#endif
208 208
209#if QDIO_VERBOSE_LEVEL>5 209#if QDIO_VERBOSE_LEVEL>5
210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) 210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
211#else 211#else
212#define QDIO_PRINT_WARN(x...) 212#define QDIO_PRINT_WARN(x...) do { } while (0)
213#endif 213#endif
214 214
215#if QDIO_VERBOSE_LEVEL>4 215#if QDIO_VERBOSE_LEVEL>4
216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) 216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
217#else 217#else
218#define QDIO_PRINT_ERR(x...) 218#define QDIO_PRINT_ERR(x...) do { } while (0)
219#endif 219#endif
220 220
221#if QDIO_VERBOSE_LEVEL>3 221#if QDIO_VERBOSE_LEVEL>3
222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) 222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
223#else 223#else
224#define QDIO_PRINT_CRIT(x...) 224#define QDIO_PRINT_CRIT(x...) do { } while (0)
225#endif 225#endif
226 226
227#if QDIO_VERBOSE_LEVEL>2 227#if QDIO_VERBOSE_LEVEL>2
228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) 228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
229#else 229#else
230#define QDIO_PRINT_ALERT(x...) 230#define QDIO_PRINT_ALERT(x...) do { } while (0)
231#endif 231#endif
232 232
233#if QDIO_VERBOSE_LEVEL>1 233#if QDIO_VERBOSE_LEVEL>1
234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
235#else 235#else
236#define QDIO_PRINT_EMERG(x...) 236#define QDIO_PRINT_EMERG(x...) do { } while (0)
237#endif 237#endif
238 238
239#define HEXDUMP16(importance,header,ptr) \ 239#define HEXDUMP16(importance,header,ptr) \
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 15edebbead7f..f0a12d2eb780 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,5 +2,16 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5z90crypt-objs := z90main.o z90hardware.o 5ifdef CONFIG_ZCRYPT_MONOLITHIC
6obj-$(CONFIG_Z90CRYPT) += z90crypt.o 6
7z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
8 zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
9obj-$(CONFIG_ZCRYPT) += z90crypt.o
10
11else
12
13ap-objs := ap_bus.o
14obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
15obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
16
17endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 000000000000..6ed0985c0c91
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1221 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/workqueue.h>
32#include <linux/notifier.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <asm/s390_rdev.h>
36
37#include "ap_bus.h"
38
39/* Some prototypes. */
40static void ap_scan_bus(void *);
41static void ap_poll_all(unsigned long);
42static void ap_poll_timeout(unsigned long);
43static int ap_poll_thread_start(void);
44static void ap_poll_thread_stop(void);
45
46/**
47 * Module description.
48 */
49MODULE_AUTHOR("IBM Corporation");
50MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
51 "Copyright 2006 IBM Corporation");
52MODULE_LICENSE("GPL");
53
54/**
55 * Module parameter
56 */
57int ap_domain_index = -1; /* Adjunct Processor Domain Index */
58module_param_named(domain, ap_domain_index, int, 0000);
59MODULE_PARM_DESC(domain, "domain index for ap devices");
60EXPORT_SYMBOL(ap_domain_index);
61
62static int ap_thread_flag = 1;
63module_param_named(poll_thread, ap_thread_flag, int, 0000);
64MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
65
66static struct device *ap_root_device = NULL;
67
68/**
69 * Workqueue & timer for bus rescan.
70 */
71static struct workqueue_struct *ap_work_queue;
72static struct timer_list ap_config_timer;
73static int ap_config_time = AP_CONFIG_TIME;
74static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
75
76/**
77 * Tasklet & timer for AP request polling.
78 */
79static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
80static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
81static atomic_t ap_poll_requests = ATOMIC_INIT(0);
82static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
83static struct task_struct *ap_poll_kthread = NULL;
84static DEFINE_MUTEX(ap_poll_thread_mutex);
85
86/**
87 * Test if ap instructions are available.
88 *
89 * Returns 0 if the ap instructions are installed.
90 */
91static inline int ap_instructions_available(void)
92{
93 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
94 register unsigned long reg1 asm ("1") = -ENODEV;
95 register unsigned long reg2 asm ("2") = 0UL;
96
97 asm volatile(
98 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
99 "0: la %1,0\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
103 return reg1;
104}
105
106/**
107 * Test adjunct processor queue.
108 * @qid: the ap queue number
109 * @queue_depth: pointer to queue depth value
110 * @device_type: pointer to device type value
111 *
112 * Returns ap queue status structure.
113 */
114static inline struct ap_queue_status
115ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
116{
117 register unsigned long reg0 asm ("0") = qid;
118 register struct ap_queue_status reg1 asm ("1");
119 register unsigned long reg2 asm ("2") = 0UL;
120
121 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
122 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
123 *device_type = (int) (reg2 >> 24);
124 *queue_depth = (int) (reg2 & 0xff);
125 return reg1;
126}
127
128/**
129 * Reset adjunct processor queue.
130 * @qid: the ap queue number
131 *
132 * Returns ap queue status structure.
133 */
134static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
135{
136 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
137 register struct ap_queue_status reg1 asm ("1");
138 register unsigned long reg2 asm ("2") = 0UL;
139
140 asm volatile(
141 ".long 0xb2af0000" /* PQAP(RAPQ) */
142 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
143 return reg1;
144}
145
146/**
147 * Send message to adjunct processor queue.
148 * @qid: the ap queue number
149 * @psmid: the program supplied message identifier
150 * @msg: the message text
151 * @length: the message length
152 *
153 * Returns ap queue status structure.
154 *
155 * Condition code 1 on NQAP can't happen because the L bit is 1.
156 *
157 * Condition code 2 on NQAP also means the send is incomplete,
158 * because a segment boundary was reached. The NQAP is repeated.
159 */
160static inline struct ap_queue_status
161__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
162{
163 typedef struct { char _[length]; } msgblock;
164 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
165 register struct ap_queue_status reg1 asm ("1");
166 register unsigned long reg2 asm ("2") = (unsigned long) msg;
167 register unsigned long reg3 asm ("3") = (unsigned long) length;
168 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
169 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
170
171 asm volatile (
172 "0: .long 0xb2ad0042\n" /* DQAP */
173 " brc 2,0b"
174 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
175 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
176 : "cc" );
177 return reg1;
178}
179
180int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
181{
182 struct ap_queue_status status;
183
184 status = __ap_send(qid, psmid, msg, length);
185 switch (status.response_code) {
186 case AP_RESPONSE_NORMAL:
187 return 0;
188 case AP_RESPONSE_Q_FULL:
189 return -EBUSY;
190 default: /* Device is gone. */
191 return -ENODEV;
192 }
193}
194EXPORT_SYMBOL(ap_send);
195
196/*
197 * Receive message from adjunct processor queue.
198 * @qid: the ap queue number
199 * @psmid: pointer to program supplied message identifier
200 * @msg: the message text
201 * @length: the message length
202 *
203 * Returns ap queue status structure.
204 *
205 * Condition code 1 on DQAP means the receive has taken place
206 * but only partially. The response is incomplete, hence the
207 * DQAP is repeated.
208 *
209 * Condition code 2 on DQAP also means the receive is incomplete,
210 * this time because a segment boundary was reached. Again, the
211 * DQAP is repeated.
212 *
213 * Note that gpr2 is used by the DQAP instruction to keep track of
214 * any 'residual' length, in case the instruction gets interrupted.
215 * Hence it gets zeroed before the instruction.
216 */
217static inline struct ap_queue_status
218__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
219{
220 typedef struct { char _[length]; } msgblock;
221 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
222 register struct ap_queue_status reg1 asm ("1");
223 register unsigned long reg2 asm("2") = 0UL;
224 register unsigned long reg4 asm("4") = (unsigned long) msg;
225 register unsigned long reg5 asm("5") = (unsigned long) length;
226 register unsigned long reg6 asm("6") = 0UL;
227 register unsigned long reg7 asm("7") = 0UL;
228
229
230 asm volatile(
231 "0: .long 0xb2ae0064\n"
232 " brc 6,0b\n"
233 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
234 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
235 "=m" (*(msgblock *) msg) : : "cc" );
236 *psmid = (((unsigned long long) reg6) << 32) + reg7;
237 return reg1;
238}
239
240int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
241{
242 struct ap_queue_status status;
243
244 status = __ap_recv(qid, psmid, msg, length);
245 switch (status.response_code) {
246 case AP_RESPONSE_NORMAL:
247 return 0;
248 case AP_RESPONSE_NO_PENDING_REPLY:
249 if (status.queue_empty)
250 return -ENOENT;
251 return -EBUSY;
252 default:
253 return -ENODEV;
254 }
255}
256EXPORT_SYMBOL(ap_recv);
257
258/**
259 * Check if an AP queue is available. The test is repeated for
260 * AP_MAX_RESET times.
261 * @qid: the ap queue number
262 * @queue_depth: pointer to queue depth value
263 * @device_type: pointer to device type value
264 */
265static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
266{
267 struct ap_queue_status status;
268 int t_depth, t_device_type, rc, i;
269
270 rc = -EBUSY;
271 for (i = 0; i < AP_MAX_RESET; i++) {
272 status = ap_test_queue(qid, &t_depth, &t_device_type);
273 switch (status.response_code) {
274 case AP_RESPONSE_NORMAL:
275 *queue_depth = t_depth + 1;
276 *device_type = t_device_type;
277 rc = 0;
278 break;
279 case AP_RESPONSE_Q_NOT_AVAIL:
280 rc = -ENODEV;
281 break;
282 case AP_RESPONSE_RESET_IN_PROGRESS:
283 break;
284 case AP_RESPONSE_DECONFIGURED:
285 rc = -ENODEV;
286 break;
287 case AP_RESPONSE_CHECKSTOPPED:
288 rc = -ENODEV;
289 break;
290 case AP_RESPONSE_BUSY:
291 break;
292 default:
293 BUG();
294 }
295 if (rc != -EBUSY)
296 break;
297 if (i < AP_MAX_RESET - 1)
298 udelay(5);
299 }
300 return rc;
301}
302
303/**
304 * Reset an AP queue and wait for it to become available again.
305 * @qid: the ap queue number
306 */
307static int ap_init_queue(ap_qid_t qid)
308{
309 struct ap_queue_status status;
310 int rc, dummy, i;
311
312 rc = -ENODEV;
313 status = ap_reset_queue(qid);
314 for (i = 0; i < AP_MAX_RESET; i++) {
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 if (status.queue_empty)
318 rc = 0;
319 break;
320 case AP_RESPONSE_Q_NOT_AVAIL:
321 case AP_RESPONSE_DECONFIGURED:
322 case AP_RESPONSE_CHECKSTOPPED:
323 i = AP_MAX_RESET; /* return with -ENODEV */
324 break;
325 case AP_RESPONSE_RESET_IN_PROGRESS:
326 case AP_RESPONSE_BUSY:
327 default:
328 break;
329 }
330 if (rc != -ENODEV)
331 break;
332 if (i < AP_MAX_RESET - 1) {
333 udelay(5);
334 status = ap_test_queue(qid, &dummy, &dummy);
335 }
336 }
337 return rc;
338}
339
340/**
341 * AP device related attributes.
342 */
343static ssize_t ap_hwtype_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 struct ap_device *ap_dev = to_ap_dev(dev);
347 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
348}
349static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
350
351static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct ap_device *ap_dev = to_ap_dev(dev);
355 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
356}
357static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
358
359static ssize_t ap_request_count_show(struct device *dev,
360 struct device_attribute *attr,
361 char *buf)
362{
363 struct ap_device *ap_dev = to_ap_dev(dev);
364 int rc;
365
366 spin_lock_bh(&ap_dev->lock);
367 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
368 spin_unlock_bh(&ap_dev->lock);
369 return rc;
370}
371
372static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
373
374static ssize_t ap_modalias_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
378}
379
380static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
381
382static struct attribute *ap_dev_attrs[] = {
383 &dev_attr_hwtype.attr,
384 &dev_attr_depth.attr,
385 &dev_attr_request_count.attr,
386 &dev_attr_modalias.attr,
387 NULL
388};
389static struct attribute_group ap_dev_attr_group = {
390 .attrs = ap_dev_attrs
391};
392
393/**
394 * AP bus driver registration/unregistration.
395 */
396static int ap_bus_match(struct device *dev, struct device_driver *drv)
397{
398 struct ap_device *ap_dev = to_ap_dev(dev);
399 struct ap_driver *ap_drv = to_ap_drv(drv);
400 struct ap_device_id *id;
401
402 /**
403 * Compare device type of the device with the list of
404 * supported types of the device_driver.
405 */
406 for (id = ap_drv->ids; id->match_flags; id++) {
407 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
408 (id->dev_type != ap_dev->device_type))
409 continue;
410 return 1;
411 }
412 return 0;
413}
414
415/**
416 * uevent function for AP devices. It sets up a single environment
417 * variable DEV_TYPE which contains the hardware device type.
418 */
419static int ap_uevent (struct device *dev, char **envp, int num_envp,
420 char *buffer, int buffer_size)
421{
422 struct ap_device *ap_dev = to_ap_dev(dev);
423 int length;
424
425 if (!ap_dev)
426 return -ENODEV;
427
428 /* Set up DEV_TYPE environment variable. */
429 envp[0] = buffer;
430 length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X",
431 ap_dev->device_type);
432 if (buffer_size - length <= 0)
433 return -ENOMEM;
434 envp[1] = 0;
435 return 0;
436}
437
438static struct bus_type ap_bus_type = {
439 .name = "ap",
440 .match = &ap_bus_match,
441 .uevent = &ap_uevent,
442};
443
444static int ap_device_probe(struct device *dev)
445{
446 struct ap_device *ap_dev = to_ap_dev(dev);
447 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
448 int rc;
449
450 ap_dev->drv = ap_drv;
451 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
452 if (rc)
453 ap_dev->unregistered = 1;
454 return rc;
455}
456
457/**
458 * Flush all requests from the request/pending queue of an AP device.
459 * @ap_dev: pointer to the AP device.
460 */
461static inline void __ap_flush_queue(struct ap_device *ap_dev)
462{
463 struct ap_message *ap_msg, *next;
464
465 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
466 list_del_init(&ap_msg->list);
467 ap_dev->pendingq_count--;
468 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
469 }
470 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
471 list_del_init(&ap_msg->list);
472 ap_dev->requestq_count--;
473 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
474 }
475}
476
477void ap_flush_queue(struct ap_device *ap_dev)
478{
479 spin_lock_bh(&ap_dev->lock);
480 __ap_flush_queue(ap_dev);
481 spin_unlock_bh(&ap_dev->lock);
482}
483EXPORT_SYMBOL(ap_flush_queue);
484
485static int ap_device_remove(struct device *dev)
486{
487 struct ap_device *ap_dev = to_ap_dev(dev);
488 struct ap_driver *ap_drv = ap_dev->drv;
489
490 spin_lock_bh(&ap_dev->lock);
491 __ap_flush_queue(ap_dev);
492 /**
493 * set ->unregistered to 1 while holding the lock. This prevents
494 * new messages to be put on the queue from now on.
495 */
496 ap_dev->unregistered = 1;
497 spin_unlock_bh(&ap_dev->lock);
498 if (ap_drv->remove)
499 ap_drv->remove(ap_dev);
500 return 0;
501}
502
503int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
504 char *name)
505{
506 struct device_driver *drv = &ap_drv->driver;
507
508 drv->bus = &ap_bus_type;
509 drv->probe = ap_device_probe;
510 drv->remove = ap_device_remove;
511 drv->owner = owner;
512 drv->name = name;
513 return driver_register(drv);
514}
515EXPORT_SYMBOL(ap_driver_register);
516
517void ap_driver_unregister(struct ap_driver *ap_drv)
518{
519 driver_unregister(&ap_drv->driver);
520}
521EXPORT_SYMBOL(ap_driver_unregister);
522
523/**
524 * AP bus attributes.
525 */
526static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
527{
528 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
529}
530
531static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
532
533static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
534{
535 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
536}
537
538static ssize_t ap_config_time_store(struct bus_type *bus,
539 const char *buf, size_t count)
540{
541 int time;
542
543 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
544 return -EINVAL;
545 ap_config_time = time;
546 if (!timer_pending(&ap_config_timer) ||
547 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
548 ap_config_timer.expires = jiffies + ap_config_time * HZ;
549 add_timer(&ap_config_timer);
550 }
551 return count;
552}
553
554static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
555
556static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
557{
558 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
559}
560
561static ssize_t ap_poll_thread_store(struct bus_type *bus,
562 const char *buf, size_t count)
563{
564 int flag, rc;
565
566 if (sscanf(buf, "%d\n", &flag) != 1)
567 return -EINVAL;
568 if (flag) {
569 rc = ap_poll_thread_start();
570 if (rc)
571 return rc;
572 }
573 else
574 ap_poll_thread_stop();
575 return count;
576}
577
578static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
579
580static struct bus_attribute *const ap_bus_attrs[] = {
581 &bus_attr_ap_domain,
582 &bus_attr_config_time,
583 &bus_attr_poll_thread,
584 NULL
585};
586
587/**
588 * Pick one of the 16 ap domains.
589 */
590static inline int ap_select_domain(void)
591{
592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j;
594
595 /**
596 * We want to use a single domain. Either the one specified with
597 * the "domain=" parameter or the domain with the maximum number
598 * of devices.
599 */
600 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
601 /* Domain has already been selected. */
602 return 0;
603 best_domain = -1;
604 max_count = 0;
605 for (i = 0; i < AP_DOMAINS; i++) {
606 count = 0;
607 for (j = 0; j < AP_DEVICES; j++) {
608 ap_qid_t qid = AP_MKQID(j, i);
609 rc = ap_query_queue(qid, &queue_depth, &device_type);
610 if (rc)
611 continue;
612 count++;
613 }
614 if (count > max_count) {
615 max_count = count;
616 best_domain = i;
617 }
618 }
619 if (best_domain >= 0){
620 ap_domain_index = best_domain;
621 return 0;
622 }
623 return -ENODEV;
624}
625
626/**
627 * Find the device type if query queue returned a device type of 0.
628 * @ap_dev: pointer to the AP device.
629 */
630static int ap_probe_device_type(struct ap_device *ap_dev)
631{
632 static unsigned char msg[] = {
633 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
634 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
635 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
636 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
637 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
638 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
639 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
640 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
641 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
642 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
643 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
644 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
645 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
646 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
647 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
648 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
649 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
650 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
651 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
652 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
653 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
654 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
655 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
656 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
657 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
658 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
659 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
660 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
661 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
662 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
663 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
664 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
665 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
666 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
667 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
668 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
669 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
670 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
671 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
672 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
673 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
674 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
675 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
676 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
677 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
678 };
679 struct ap_queue_status status;
680 unsigned long long psmid;
681 char *reply;
682 int rc, i;
683
684 reply = (void *) get_zeroed_page(GFP_KERNEL);
685 if (!reply) {
686 rc = -ENOMEM;
687 goto out;
688 }
689
690 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
691 msg, sizeof(msg));
692 if (status.response_code != AP_RESPONSE_NORMAL) {
693 rc = -ENODEV;
694 goto out_free;
695 }
696
697 /* Wait for the test message to complete. */
698 for (i = 0; i < 6; i++) {
699 mdelay(300);
700 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
701 if (status.response_code == AP_RESPONSE_NORMAL &&
702 psmid == 0x0102030405060708ULL)
703 break;
704 }
705 if (i < 6) {
706 /* Got an answer. */
707 if (reply[0] == 0x00 && reply[1] == 0x86)
708 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
709 else
710 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
711 rc = 0;
712 } else
713 rc = -ENODEV;
714
715out_free:
716 free_page((unsigned long) reply);
717out:
718 return rc;
719}
720
721/**
722 * Scan the ap bus for new devices.
723 */
724static int __ap_scan_bus(struct device *dev, void *data)
725{
726 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
727}
728
729static void ap_device_release(struct device *dev)
730{
731 struct ap_device *ap_dev = to_ap_dev(dev);
732
733 kfree(ap_dev);
734}
735
736static void ap_scan_bus(void *data)
737{
738 struct ap_device *ap_dev;
739 struct device *dev;
740 ap_qid_t qid;
741 int queue_depth, device_type;
742 int rc, i;
743
744 if (ap_select_domain() != 0)
745 return;
746 for (i = 0; i < AP_DEVICES; i++) {
747 qid = AP_MKQID(i, ap_domain_index);
748 dev = bus_find_device(&ap_bus_type, NULL,
749 (void *)(unsigned long)qid,
750 __ap_scan_bus);
751 if (dev) {
752 put_device(dev);
753 continue;
754 }
755 rc = ap_query_queue(qid, &queue_depth, &device_type);
756 if (rc)
757 continue;
758 rc = ap_init_queue(qid);
759 if (rc)
760 continue;
761 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
762 if (!ap_dev)
763 break;
764 ap_dev->qid = qid;
765 ap_dev->queue_depth = queue_depth;
766 spin_lock_init(&ap_dev->lock);
767 INIT_LIST_HEAD(&ap_dev->pendingq);
768 INIT_LIST_HEAD(&ap_dev->requestq);
769 if (device_type == 0)
770 ap_probe_device_type(ap_dev);
771 else
772 ap_dev->device_type = device_type;
773
774 ap_dev->device.bus = &ap_bus_type;
775 ap_dev->device.parent = ap_root_device;
776 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
777 AP_QID_DEVICE(ap_dev->qid));
778 ap_dev->device.release = ap_device_release;
779 rc = device_register(&ap_dev->device);
780 if (rc) {
781 kfree(ap_dev);
782 continue;
783 }
784 /* Add device attributes. */
785 rc = sysfs_create_group(&ap_dev->device.kobj,
786 &ap_dev_attr_group);
787 if (rc)
788 device_unregister(&ap_dev->device);
789 }
790}
791
792static void
793ap_config_timeout(unsigned long ptr)
794{
795 queue_work(ap_work_queue, &ap_config_work);
796 ap_config_timer.expires = jiffies + ap_config_time * HZ;
797 add_timer(&ap_config_timer);
798}
799
800/**
801 * Set up the timer to run the poll tasklet
802 */
803static inline void ap_schedule_poll_timer(void)
804{
805 if (timer_pending(&ap_poll_timer))
806 return;
807 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
808}
809
810/**
811 * Receive pending reply messages from an AP device.
812 * @ap_dev: pointer to the AP device
813 * @flags: pointer to control flags, bit 2^0 is set if another poll is
814 * required, bit 2^1 is set if the poll timer needs to get armed
815 * Returns 0 if the device is still present, -ENODEV if not.
816 */
817static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
818{
819 struct ap_queue_status status;
820 struct ap_message *ap_msg;
821
822 if (ap_dev->queue_count <= 0)
823 return 0;
824 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
825 ap_dev->reply->message, ap_dev->reply->length);
826 switch (status.response_code) {
827 case AP_RESPONSE_NORMAL:
828 atomic_dec(&ap_poll_requests);
829 ap_dev->queue_count--;
830 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
831 if (ap_msg->psmid != ap_dev->reply->psmid)
832 continue;
833 list_del_init(&ap_msg->list);
834 ap_dev->pendingq_count--;
835 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
836 break;
837 }
838 if (ap_dev->queue_count > 0)
839 *flags |= 1;
840 break;
841 case AP_RESPONSE_NO_PENDING_REPLY:
842 if (status.queue_empty) {
843 /* The card shouldn't forget requests but who knows. */
844 ap_dev->queue_count = 0;
845 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
846 ap_dev->requestq_count += ap_dev->pendingq_count;
847 ap_dev->pendingq_count = 0;
848 } else
849 *flags |= 2;
850 break;
851 default:
852 return -ENODEV;
853 }
854 return 0;
855}
856
857/**
858 * Send messages from the request queue to an AP device.
859 * @ap_dev: pointer to the AP device
860 * @flags: pointer to control flags, bit 2^0 is set if another poll is
861 * required, bit 2^1 is set if the poll timer needs to get armed
862 * Returns 0 if the device is still present, -ENODEV if not.
863 */
864static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
865{
866 struct ap_queue_status status;
867 struct ap_message *ap_msg;
868
869 if (ap_dev->requestq_count <= 0 ||
870 ap_dev->queue_count >= ap_dev->queue_depth)
871 return 0;
872 /* Start the next request on the queue. */
873 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
874 status = __ap_send(ap_dev->qid, ap_msg->psmid,
875 ap_msg->message, ap_msg->length);
876 switch (status.response_code) {
877 case AP_RESPONSE_NORMAL:
878 atomic_inc(&ap_poll_requests);
879 ap_dev->queue_count++;
880 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
881 ap_dev->requestq_count--;
882 ap_dev->pendingq_count++;
883 if (ap_dev->queue_count < ap_dev->queue_depth &&
884 ap_dev->requestq_count > 0)
885 *flags |= 1;
886 *flags |= 2;
887 break;
888 case AP_RESPONSE_Q_FULL:
889 *flags |= 2;
890 break;
891 case AP_RESPONSE_MESSAGE_TOO_BIG:
892 return -EINVAL;
893 default:
894 return -ENODEV;
895 }
896 return 0;
897}
898
899/**
900 * Poll AP device for pending replies and send new messages. If either
901 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
902 * @ap_dev: pointer to the bus device
903 * @flags: pointer to control flags, bit 2^0 is set if another poll is
904 * required, bit 2^1 is set if the poll timer needs to get armed
905 * Returns 0.
906 */
907static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
908{
909 int rc;
910
911 rc = ap_poll_read(ap_dev, flags);
912 if (rc)
913 return rc;
914 return ap_poll_write(ap_dev, flags);
915}
916
917/**
918 * Queue a message to a device.
919 * @ap_dev: pointer to the AP device
920 * @ap_msg: the message to be queued
921 */
922static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
923{
924 struct ap_queue_status status;
925
926 if (list_empty(&ap_dev->requestq) &&
927 ap_dev->queue_count < ap_dev->queue_depth) {
928 status = __ap_send(ap_dev->qid, ap_msg->psmid,
929 ap_msg->message, ap_msg->length);
930 switch (status.response_code) {
931 case AP_RESPONSE_NORMAL:
932 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
933 atomic_inc(&ap_poll_requests);
934 ap_dev->pendingq_count++;
935 ap_dev->queue_count++;
936 ap_dev->total_request_count++;
937 break;
938 case AP_RESPONSE_Q_FULL:
939 list_add_tail(&ap_msg->list, &ap_dev->requestq);
940 ap_dev->requestq_count++;
941 ap_dev->total_request_count++;
942 return -EBUSY;
943 case AP_RESPONSE_MESSAGE_TOO_BIG:
944 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
945 return -EINVAL;
946 default: /* Device is gone. */
947 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
948 return -ENODEV;
949 }
950 } else {
951 list_add_tail(&ap_msg->list, &ap_dev->requestq);
952 ap_dev->requestq_count++;
953 ap_dev->total_request_count++;
954 return -EBUSY;
955 }
956 ap_schedule_poll_timer();
957 return 0;
958}
959
960void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
961{
962 unsigned long flags;
963 int rc;
964
965 spin_lock_bh(&ap_dev->lock);
966 if (!ap_dev->unregistered) {
967 /* Make room on the queue by polling for finished requests. */
968 rc = ap_poll_queue(ap_dev, &flags);
969 if (!rc)
970 rc = __ap_queue_message(ap_dev, ap_msg);
971 if (!rc)
972 wake_up(&ap_poll_wait);
973 } else {
974 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
975 rc = 0;
976 }
977 spin_unlock_bh(&ap_dev->lock);
978 if (rc == -ENODEV)
979 device_unregister(&ap_dev->device);
980}
981EXPORT_SYMBOL(ap_queue_message);
982
983/**
984 * Cancel a crypto request. This is done by removing the request
985 * from the devive pendingq or requestq queue. Note that the
986 * request stays on the AP queue. When it finishes the message
987 * reply will be discarded because the psmid can't be found.
988 * @ap_dev: AP device that has the message queued
989 * @ap_msg: the message that is to be removed
990 */
991void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
992{
993 struct ap_message *tmp;
994
995 spin_lock_bh(&ap_dev->lock);
996 if (!list_empty(&ap_msg->list)) {
997 list_for_each_entry(tmp, &ap_dev->pendingq, list)
998 if (tmp->psmid == ap_msg->psmid) {
999 ap_dev->pendingq_count--;
1000 goto found;
1001 }
1002 ap_dev->requestq_count--;
1003 found:
1004 list_del_init(&ap_msg->list);
1005 }
1006 spin_unlock_bh(&ap_dev->lock);
1007}
1008EXPORT_SYMBOL(ap_cancel_message);
1009
1010/**
1011 * AP receive polling for finished AP requests
1012 */
1013static void ap_poll_timeout(unsigned long unused)
1014{
1015 tasklet_schedule(&ap_tasklet);
1016}
1017
1018/**
1019 * Poll all AP devices on the bus in a round robin fashion. Continue
1020 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1021 * of the control flags has been set arm the poll timer.
1022 */
1023static int __ap_poll_all(struct device *dev, void *data)
1024{
1025 struct ap_device *ap_dev = to_ap_dev(dev);
1026 int rc;
1027
1028 spin_lock(&ap_dev->lock);
1029 if (!ap_dev->unregistered) {
1030 rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
1031 } else
1032 rc = 0;
1033 spin_unlock(&ap_dev->lock);
1034 if (rc)
1035 device_unregister(&ap_dev->device);
1036 return 0;
1037}
1038
1039static void ap_poll_all(unsigned long dummy)
1040{
1041 unsigned long flags;
1042
1043 do {
1044 flags = 0;
1045 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1046 } while (flags & 1);
1047 if (flags & 2)
1048 ap_schedule_poll_timer();
1049}
1050
1051/**
1052 * AP bus poll thread. The purpose of this thread is to poll for
1053 * finished requests in a loop if there is a "free" cpu - that is
1054 * a cpu that doesn't have anything better to do. The polling stops
1055 * as soon as there is another task or if all messages have been
1056 * delivered.
1057 */
1058static int ap_poll_thread(void *data)
1059{
1060 DECLARE_WAITQUEUE(wait, current);
1061 unsigned long flags;
1062 int requests;
1063
1064 set_user_nice(current, -20);
1065 while (1) {
1066 if (need_resched()) {
1067 schedule();
1068 continue;
1069 }
1070 add_wait_queue(&ap_poll_wait, &wait);
1071 set_current_state(TASK_INTERRUPTIBLE);
1072 if (kthread_should_stop())
1073 break;
1074 requests = atomic_read(&ap_poll_requests);
1075 if (requests <= 0)
1076 schedule();
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(&ap_poll_wait, &wait);
1079
1080 local_bh_disable();
1081 flags = 0;
1082 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1083 local_bh_enable();
1084 }
1085 set_current_state(TASK_RUNNING);
1086 remove_wait_queue(&ap_poll_wait, &wait);
1087 return 0;
1088}
1089
1090static int ap_poll_thread_start(void)
1091{
1092 int rc;
1093
1094 mutex_lock(&ap_poll_thread_mutex);
1095 if (!ap_poll_kthread) {
1096 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1097 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1098 if (rc)
1099 ap_poll_kthread = NULL;
1100 }
1101 else
1102 rc = 0;
1103 mutex_unlock(&ap_poll_thread_mutex);
1104 return rc;
1105}
1106
1107static void ap_poll_thread_stop(void)
1108{
1109 mutex_lock(&ap_poll_thread_mutex);
1110 if (ap_poll_kthread) {
1111 kthread_stop(ap_poll_kthread);
1112 ap_poll_kthread = NULL;
1113 }
1114 mutex_unlock(&ap_poll_thread_mutex);
1115}
1116
1117/**
1118 * The module initialization code.
1119 */
1120int __init ap_module_init(void)
1121{
1122 int rc, i;
1123
1124 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1125 printk(KERN_WARNING "Invalid param: domain = %d. "
1126 " Not loading.\n", ap_domain_index);
1127 return -EINVAL;
1128 }
1129 if (ap_instructions_available() != 0) {
1130 printk(KERN_WARNING "AP instructions not installed.\n");
1131 return -ENODEV;
1132 }
1133
1134 /* Create /sys/bus/ap. */
1135 rc = bus_register(&ap_bus_type);
1136 if (rc)
1137 goto out;
1138 for (i = 0; ap_bus_attrs[i]; i++) {
1139 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1140 if (rc)
1141 goto out_bus;
1142 }
1143
1144 /* Create /sys/devices/ap. */
1145 ap_root_device = s390_root_dev_register("ap");
1146 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1147 if (rc)
1148 goto out_bus;
1149
1150 ap_work_queue = create_singlethread_workqueue("kapwork");
1151 if (!ap_work_queue) {
1152 rc = -ENOMEM;
1153 goto out_root;
1154 }
1155
1156 if (ap_select_domain() == 0)
1157 ap_scan_bus(NULL);
1158
1159 /* Setup the ap bus rescan timer. */
1160 init_timer(&ap_config_timer);
1161 ap_config_timer.function = ap_config_timeout;
1162 ap_config_timer.data = 0;
1163 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1164 add_timer(&ap_config_timer);
1165
1166 /* Start the low priority AP bus poll thread. */
1167 if (ap_thread_flag) {
1168 rc = ap_poll_thread_start();
1169 if (rc)
1170 goto out_work;
1171 }
1172
1173 return 0;
1174
1175out_work:
1176 del_timer_sync(&ap_config_timer);
1177 del_timer_sync(&ap_poll_timer);
1178 destroy_workqueue(ap_work_queue);
1179out_root:
1180 s390_root_dev_unregister(ap_root_device);
1181out_bus:
1182 while (i--)
1183 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1184 bus_unregister(&ap_bus_type);
1185out:
1186 return rc;
1187}
1188
1189static int __ap_match_all(struct device *dev, void *data)
1190{
1191 return 1;
1192}
1193
1194/**
1195 * The module termination code
1196 */
1197void ap_module_exit(void)
1198{
1199 int i;
1200 struct device *dev;
1201
1202 ap_poll_thread_stop();
1203 del_timer_sync(&ap_config_timer);
1204 del_timer_sync(&ap_poll_timer);
1205 destroy_workqueue(ap_work_queue);
1206 s390_root_dev_unregister(ap_root_device);
1207 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1208 __ap_match_all)))
1209 {
1210 device_unregister(dev);
1211 put_device(dev);
1212 }
1213 for (i = 0; ap_bus_attrs[i]; i++)
1214 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1215 bus_unregister(&ap_bus_type);
1216}
1217
1218#ifndef CONFIG_ZCRYPT_MONOLITHIC
1219module_init(ap_module_init);
1220module_exit(ap_module_exit);
1221#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 000000000000..83b69c01cd6e
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,158 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.h
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus header file.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _AP_BUS_H_
27#define _AP_BUS_H_
28
29#include <linux/device.h>
30#include <linux/mod_devicetable.h>
31#include <linux/types.h>
32
33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
38
39extern int ap_domain_index;
40
41/**
42 * The ap_qid_t identifier of an ap queue. It contains a
43 * 6 bit device index and a 4 bit queue index (domain).
44 */
45typedef unsigned int ap_qid_t;
46
47#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
48#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
49#define AP_QID_QUEUE(_qid) ((_qid) & 15)
50
51/**
52 * The ap queue status word is returned by all three AP functions
53 * (PQAP, NQAP and DQAP). There's a set of flags in the first
54 * byte, followed by a 1 byte response code.
55 */
56struct ap_queue_status {
57 unsigned int queue_empty : 1;
58 unsigned int replies_waiting : 1;
59 unsigned int queue_full : 1;
60 unsigned int pad1 : 5;
61 unsigned int response_code : 8;
62 unsigned int pad2 : 16;
63};
64
65#define AP_RESPONSE_NORMAL 0x00
66#define AP_RESPONSE_Q_NOT_AVAIL 0x01
67#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
68#define AP_RESPONSE_DECONFIGURED 0x03
69#define AP_RESPONSE_CHECKSTOPPED 0x04
70#define AP_RESPONSE_BUSY 0x05
71#define AP_RESPONSE_Q_FULL 0x10
72#define AP_RESPONSE_NO_PENDING_REPLY 0x10
73#define AP_RESPONSE_INDEX_TOO_BIG 0x11
74#define AP_RESPONSE_NO_FIRST_PART 0x13
75#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
76
77/**
78 * Known device types
79 */
80#define AP_DEVICE_TYPE_PCICC 3
81#define AP_DEVICE_TYPE_PCICA 4
82#define AP_DEVICE_TYPE_PCIXCC 5
83#define AP_DEVICE_TYPE_CEX2A 6
84#define AP_DEVICE_TYPE_CEX2C 7
85
86struct ap_device;
87struct ap_message;
88
89struct ap_driver {
90 struct device_driver driver;
91 struct ap_device_id *ids;
92
93 int (*probe)(struct ap_device *);
94 void (*remove)(struct ap_device *);
95 /* receive is called from tasklet context */
96 void (*receive)(struct ap_device *, struct ap_message *,
97 struct ap_message *);
98};
99
100#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
101
102int ap_driver_register(struct ap_driver *, struct module *, char *);
103void ap_driver_unregister(struct ap_driver *);
104
105struct ap_device {
106 struct device device;
107 struct ap_driver *drv; /* Pointer to AP device driver. */
108 spinlock_t lock; /* Per device lock. */
109
110 ap_qid_t qid; /* AP queue id. */
111 int queue_depth; /* AP queue depth.*/
112 int device_type; /* AP device type. */
113 int unregistered; /* marks AP device as unregistered */
114
115 int queue_count; /* # messages currently on AP queue. */
116
117 struct list_head pendingq; /* List of message sent to AP queue. */
118 int pendingq_count; /* # requests on pendingq list. */
119 struct list_head requestq; /* List of message yet to be sent. */
120 int requestq_count; /* # requests on requestq list. */
121 int total_request_count; /* # requests ever for this AP device. */
122
123 struct ap_message *reply; /* Per device reply message. */
124
125 void *private; /* ap driver private pointer. */
126};
127
128#define to_ap_dev(x) container_of((x), struct ap_device, device)
129
130struct ap_message {
131 struct list_head list; /* Request queueing. */
132 unsigned long long psmid; /* Message id. */
133 void *message; /* Pointer to message buffer. */
134 size_t length; /* Message length. */
135
136 void *private; /* ap driver private pointer. */
137};
138
139#define AP_DEVICE(dt) \
140 .dev_type=(dt), \
141 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
142
143/**
144 * Note: don't use ap_send/ap_recv after using ap_queue_message
145 * for the first time. Otherwise the ap message queue will get
146 * confused.
147 */
148int ap_send(ap_qid_t, unsigned long long, void *, size_t);
149int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
150
151void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
152void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
153void ap_flush_queue(struct ap_device *ap_dev);
154
155int ap_module_init(void);
156void ap_module_exit(void);
157
158#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
deleted file mode 100644
index dbbcda3c846a..000000000000
--- a/drivers/s390/crypto/z90common.h
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90common.h
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90COMMON_H_
28#define _Z90COMMON_H_
29
30
31#define RESPBUFFSIZE 256
32#define PCI_FUNC_KEY_DECRYPT 0x5044
33#define PCI_FUNC_KEY_ENCRYPT 0x504B
34extern int ext_bitlens;
35
36enum devstat {
37 DEV_GONE,
38 DEV_ONLINE,
39 DEV_QUEUE_FULL,
40 DEV_EMPTY,
41 DEV_NO_WORK,
42 DEV_BAD_MESSAGE,
43 DEV_TSQ_EXCEPTION,
44 DEV_RSQ_EXCEPTION,
45 DEV_SEN_EXCEPTION,
46 DEV_REC_EXCEPTION
47};
48
49enum hdstat {
50 HD_NOT_THERE,
51 HD_BUSY,
52 HD_DECONFIGURED,
53 HD_CHECKSTOPPED,
54 HD_ONLINE,
55 HD_TSQ_EXCEPTION
56};
57
58#define Z90C_NO_DEVICES 1
59#define Z90C_AMBIGUOUS_DOMAIN 2
60#define Z90C_INCORRECT_DOMAIN 3
61#define ENOTINIT 4
62
63#define SEN_BUSY 7
64#define SEN_USER_ERROR 8
65#define SEN_QUEUE_FULL 11
66#define SEN_NOT_AVAIL 16
67#define SEN_PAD_ERROR 17
68#define SEN_RETRY 18
69#define SEN_RELEASED 24
70
71#define REC_EMPTY 4
72#define REC_BUSY 6
73#define REC_OPERAND_INV 8
74#define REC_OPERAND_SIZE 9
75#define REC_EVEN_MOD 10
76#define REC_NO_WORK 11
77#define REC_HARDWAR_ERR 12
78#define REC_NO_RESPONSE 13
79#define REC_RETRY_DEV 14
80#define REC_USER_GONE 15
81#define REC_BAD_MESSAGE 16
82#define REC_INVALID_PAD 17
83#define REC_USE_PCICA 18
84
85#define WRONG_DEVICE_TYPE 20
86
87#define REC_FATAL_ERROR 32
88#define SEN_FATAL_ERROR 33
89#define TSQ_FATAL_ERROR 34
90#define RSQ_FATAL_ERROR 35
91
92#define Z90CRYPT_NUM_TYPES 6
93#define PCICA 0
94#define PCICC 1
95#define PCIXCC_MCL2 2
96#define PCIXCC_MCL3 3
97#define CEX2C 4
98#define CEX2A 5
99#define NILDEV -1
100#define ANYDEV -1
101#define PCIXCC_UNK -2
102
103enum hdevice_type {
104 PCICC_HW = 3,
105 PCICA_HW = 4,
106 PCIXCC_HW = 5,
107 CEX2A_HW = 6,
108 CEX2C_HW = 7
109};
110
111struct CPRBX {
112 unsigned short cprb_len;
113 unsigned char cprb_ver_id;
114 unsigned char pad_000[3];
115 unsigned char func_id[2];
116 unsigned char cprb_flags[4];
117 unsigned int req_parml;
118 unsigned int req_datal;
119 unsigned int rpl_msgbl;
120 unsigned int rpld_parml;
121 unsigned int rpl_datal;
122 unsigned int rpld_datal;
123 unsigned int req_extbl;
124 unsigned char pad_001[4];
125 unsigned int rpld_extbl;
126 unsigned char req_parmb[16];
127 unsigned char req_datab[16];
128 unsigned char rpl_parmb[16];
129 unsigned char rpl_datab[16];
130 unsigned char req_extb[16];
131 unsigned char rpl_extb[16];
132 unsigned short ccp_rtcode;
133 unsigned short ccp_rscode;
134 unsigned int mac_data_len;
135 unsigned char logon_id[8];
136 unsigned char mac_value[8];
137 unsigned char mac_content_flgs;
138 unsigned char pad_002;
139 unsigned short domain;
140 unsigned char pad_003[12];
141 unsigned char pad_004[36];
142};
143
144#ifndef DEV_NAME
145#define DEV_NAME "z90crypt"
146#endif
147#define PRINTK(fmt, args...) \
148 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
149#define PRINTKN(fmt, args...) \
150 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
151#define PRINTKW(fmt, args...) \
152 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
153#define PRINTKC(fmt, args...) \
154 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
155
156#ifdef Z90CRYPT_DEBUG
157#define PDEBUG(fmt, args...) \
158 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
159#else
160#define PDEBUG(fmt, args...) do {} while (0)
161#endif
162
163#define UMIN(a,b) ((a) < (b) ? (a) : (b))
164#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
165
166#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
deleted file mode 100644
index 0ca1d126ccb6..000000000000
--- a/drivers/s390/crypto/z90crypt.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90crypt.h
3 *
4 * z90crypt 1.3.3 (kernel-private header)
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90CRYPT_H_
28#define _Z90CRYPT_H_
29
30#include <asm/z90crypt.h>
31
32/**
33 * local errno definitions
34 */
35#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
36#define EWORKPEND 130 // user issues ioctl while another pending
37#define ERELEASED 131 // user released while ioctl pending
38#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
39#define ETIMEOUT 133 // request timed out
40#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
41#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
42 // (retry in software)
43
44/**
45 * DEPRECATED STRUCTURES
46 */
47
48/**
49 * This structure is DEPRECATED and the corresponding ioctl() has been
50 * replaced with individual ioctl()s for each piece of data!
51 * This structure will NOT survive past version 1.3.1, so switch to the
52 * new ioctl()s.
53 */
54#define MASK_LENGTH 64 // mask length
55struct ica_z90_status {
56 int totalcount;
57 int leedslitecount; // PCICA
58 int leeds2count; // PCICC
59 // int PCIXCCCount; is not in struct for backward compatibility
60 int requestqWaitCount;
61 int pendingqWaitCount;
62 int totalOpenCount;
63 int cryptoDomain;
64 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
65 // 5=CEX2C
66 unsigned char status[MASK_LENGTH];
67 // qdepth: # work elements waiting for each device
68 unsigned char qdepth[MASK_LENGTH];
69};
70
71#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
deleted file mode 100644
index be60795f4a74..000000000000
--- a/drivers/s390/crypto/z90hardware.c
+++ /dev/null
@@ -1,2531 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90hardware.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h>
28#include <linux/compiler.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include "z90crypt.h"
33#include "z90common.h"
34
35struct cca_token_hdr {
36 unsigned char token_identifier;
37 unsigned char version;
38 unsigned short token_length;
39 unsigned char reserved[4];
40};
41
42#define CCA_TKN_HDR_ID_EXT 0x1E
43
44struct cca_private_ext_ME_sec {
45 unsigned char section_identifier;
46 unsigned char version;
47 unsigned short section_length;
48 unsigned char private_key_hash[20];
49 unsigned char reserved1[4];
50 unsigned char key_format;
51 unsigned char reserved2;
52 unsigned char key_name_hash[20];
53 unsigned char key_use_flags[4];
54 unsigned char reserved3[6];
55 unsigned char reserved4[24];
56 unsigned char confounder[24];
57 unsigned char exponent[128];
58 unsigned char modulus[128];
59};
60
61#define CCA_PVT_USAGE_ALL 0x80
62
63struct cca_public_sec {
64 unsigned char section_identifier;
65 unsigned char version;
66 unsigned short section_length;
67 unsigned char reserved[2];
68 unsigned short exponent_len;
69 unsigned short modulus_bit_len;
70 unsigned short modulus_byte_len;
71 unsigned char exponent[3];
72};
73
74struct cca_private_ext_ME {
75 struct cca_token_hdr pvtMEHdr;
76 struct cca_private_ext_ME_sec pvtMESec;
77 struct cca_public_sec pubMESec;
78};
79
80struct cca_public_key {
81 struct cca_token_hdr pubHdr;
82 struct cca_public_sec pubSec;
83};
84
85struct cca_pvt_ext_CRT_sec {
86 unsigned char section_identifier;
87 unsigned char version;
88 unsigned short section_length;
89 unsigned char private_key_hash[20];
90 unsigned char reserved1[4];
91 unsigned char key_format;
92 unsigned char reserved2;
93 unsigned char key_name_hash[20];
94 unsigned char key_use_flags[4];
95 unsigned short p_len;
96 unsigned short q_len;
97 unsigned short dp_len;
98 unsigned short dq_len;
99 unsigned short u_len;
100 unsigned short mod_len;
101 unsigned char reserved3[4];
102 unsigned short pad_len;
103 unsigned char reserved4[52];
104 unsigned char confounder[8];
105};
106
107#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
108#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
109
110struct cca_private_ext_CRT {
111 struct cca_token_hdr pvtCrtHdr;
112 struct cca_pvt_ext_CRT_sec pvtCrtSec;
113 struct cca_public_sec pubCrtSec;
114};
115
116struct ap_status_word {
117 unsigned char q_stat_flags;
118 unsigned char response_code;
119 unsigned char reserved[2];
120};
121
122#define AP_Q_STATUS_EMPTY 0x80
123#define AP_Q_STATUS_REPLIES_WAITING 0x40
124#define AP_Q_STATUS_ARRAY_FULL 0x20
125
126#define AP_RESPONSE_NORMAL 0x00
127#define AP_RESPONSE_Q_NOT_AVAIL 0x01
128#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
129#define AP_RESPONSE_DECONFIGURED 0x03
130#define AP_RESPONSE_CHECKSTOPPED 0x04
131#define AP_RESPONSE_BUSY 0x05
132#define AP_RESPONSE_Q_FULL 0x10
133#define AP_RESPONSE_NO_PENDING_REPLY 0x10
134#define AP_RESPONSE_INDEX_TOO_BIG 0x11
135#define AP_RESPONSE_NO_FIRST_PART 0x13
136#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
137
138#define AP_MAX_CDX_BITL 4
139#define AP_RQID_RESERVED_BITL 4
140#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
141
142struct type4_hdr {
143 unsigned char reserved1;
144 unsigned char msg_type_code;
145 unsigned short msg_len;
146 unsigned char request_code;
147 unsigned char msg_fmt;
148 unsigned short reserved2;
149};
150
151#define TYPE4_TYPE_CODE 0x04
152#define TYPE4_REQU_CODE 0x40
153
154#define TYPE4_SME_LEN 0x0188
155#define TYPE4_LME_LEN 0x0308
156#define TYPE4_SCR_LEN 0x01E0
157#define TYPE4_LCR_LEN 0x03A0
158
159#define TYPE4_SME_FMT 0x00
160#define TYPE4_LME_FMT 0x10
161#define TYPE4_SCR_FMT 0x40
162#define TYPE4_LCR_FMT 0x50
163
164struct type4_sme {
165 struct type4_hdr header;
166 unsigned char message[128];
167 unsigned char exponent[128];
168 unsigned char modulus[128];
169};
170
171struct type4_lme {
172 struct type4_hdr header;
173 unsigned char message[256];
174 unsigned char exponent[256];
175 unsigned char modulus[256];
176};
177
178struct type4_scr {
179 struct type4_hdr header;
180 unsigned char message[128];
181 unsigned char dp[72];
182 unsigned char dq[64];
183 unsigned char p[72];
184 unsigned char q[64];
185 unsigned char u[72];
186};
187
188struct type4_lcr {
189 struct type4_hdr header;
190 unsigned char message[256];
191 unsigned char dp[136];
192 unsigned char dq[128];
193 unsigned char p[136];
194 unsigned char q[128];
195 unsigned char u[136];
196};
197
198union type4_msg {
199 struct type4_sme sme;
200 struct type4_lme lme;
201 struct type4_scr scr;
202 struct type4_lcr lcr;
203};
204
205struct type84_hdr {
206 unsigned char reserved1;
207 unsigned char code;
208 unsigned short len;
209 unsigned char reserved2[4];
210};
211
212#define TYPE84_RSP_CODE 0x84
213
214struct type6_hdr {
215 unsigned char reserved1;
216 unsigned char type;
217 unsigned char reserved2[2];
218 unsigned char right[4];
219 unsigned char reserved3[2];
220 unsigned char reserved4[2];
221 unsigned char apfs[4];
222 unsigned int offset1;
223 unsigned int offset2;
224 unsigned int offset3;
225 unsigned int offset4;
226 unsigned char agent_id[16];
227 unsigned char rqid[2];
228 unsigned char reserved5[2];
229 unsigned char function_code[2];
230 unsigned char reserved6[2];
231 unsigned int ToCardLen1;
232 unsigned int ToCardLen2;
233 unsigned int ToCardLen3;
234 unsigned int ToCardLen4;
235 unsigned int FromCardLen1;
236 unsigned int FromCardLen2;
237 unsigned int FromCardLen3;
238 unsigned int FromCardLen4;
239};
240
241struct CPRB {
242 unsigned char cprb_len[2];
243 unsigned char cprb_ver_id;
244 unsigned char pad_000;
245 unsigned char srpi_rtcode[4];
246 unsigned char srpi_verb;
247 unsigned char flags;
248 unsigned char func_id[2];
249 unsigned char checkpoint_flag;
250 unsigned char resv2;
251 unsigned char req_parml[2];
252 unsigned char req_parmp[4];
253 unsigned char req_datal[4];
254 unsigned char req_datap[4];
255 unsigned char rpl_parml[2];
256 unsigned char pad_001[2];
257 unsigned char rpl_parmp[4];
258 unsigned char rpl_datal[4];
259 unsigned char rpl_datap[4];
260 unsigned char ccp_rscode[2];
261 unsigned char ccp_rtcode[2];
262 unsigned char repd_parml[2];
263 unsigned char mac_data_len[2];
264 unsigned char repd_datal[4];
265 unsigned char req_pc[2];
266 unsigned char res_origin[8];
267 unsigned char mac_value[8];
268 unsigned char logon_id[8];
269 unsigned char usage_domain[2];
270 unsigned char resv3[18];
271 unsigned char svr_namel[2];
272 unsigned char svr_name[8];
273};
274
275struct type6_msg {
276 struct type6_hdr header;
277 struct CPRB CPRB;
278};
279
280struct type86_hdr {
281 unsigned char reserved1;
282 unsigned char type;
283 unsigned char format;
284 unsigned char reserved2;
285 unsigned char reply_code;
286 unsigned char reserved3[3];
287};
288
289#define TYPE86_RSP_CODE 0x86
290#define TYPE86_FMT2 0x02
291
292struct type86_fmt2_msg {
293 struct type86_hdr header;
294 unsigned char reserved[4];
295 unsigned char apfs[4];
296 unsigned int count1;
297 unsigned int offset1;
298 unsigned int count2;
299 unsigned int offset2;
300 unsigned int count3;
301 unsigned int offset3;
302 unsigned int count4;
303 unsigned int offset4;
304};
305
306static struct type6_hdr static_type6_hdr = {
307 0x00,
308 0x06,
309 {0x00,0x00},
310 {0x00,0x00,0x00,0x00},
311 {0x00,0x00},
312 {0x00,0x00},
313 {0x00,0x00,0x00,0x00},
314 0x00000058,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
319 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
320 {0x00,0x00},
321 {0x00,0x00},
322 {0x50,0x44},
323 {0x00,0x00},
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000
332};
333
334static struct type6_hdr static_type6_hdrX = {
335 0x00,
336 0x06,
337 {0x00,0x00},
338 {0x00,0x00,0x00,0x00},
339 {0x00,0x00},
340 {0x00,0x00},
341 {0x00,0x00,0x00,0x00},
342 0x00000058,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
347 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
348 {0x00,0x00},
349 {0x00,0x00},
350 {0x50,0x44},
351 {0x00,0x00},
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000
360};
361
362static struct CPRB static_cprb = {
363 {0x70,0x00},
364 0x41,
365 0x00,
366 {0x00,0x00,0x00,0x00},
367 0x00,
368 0x00,
369 {0x54,0x32},
370 0x01,
371 0x00,
372 {0x00,0x00},
373 {0x00,0x00,0x00,0x00},
374 {0x00,0x00,0x00,0x00},
375 {0x00,0x00,0x00,0x00},
376 {0x00,0x00},
377 {0x00,0x00},
378 {0x00,0x00,0x00,0x00},
379 {0x00,0x00,0x00,0x00},
380 {0x00,0x00,0x00,0x00},
381 {0x00,0x00},
382 {0x00,0x00},
383 {0x00,0x00},
384 {0x00,0x00},
385 {0x00,0x00,0x00,0x00},
386 {0x00,0x00},
387 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
388 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
389 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
390 {0x00,0x00},
391 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
392 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
393 0x00,0x00},
394 {0x08,0x00},
395 {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
396};
397
398struct function_and_rules_block {
399 unsigned char function_code[2];
400 unsigned char ulen[2];
401 unsigned char only_rule[8];
402};
403
404static struct function_and_rules_block static_pkd_function_and_rules = {
405 {0x50,0x44},
406 {0x0A,0x00},
407 {'P','K','C','S','-','1','.','2'}
408};
409
410static struct function_and_rules_block static_pke_function_and_rules = {
411 {0x50,0x4B},
412 {0x0A,0x00},
413 {'P','K','C','S','-','1','.','2'}
414};
415
416struct T6_keyBlock_hdr {
417 unsigned char blen[2];
418 unsigned char ulen[2];
419 unsigned char flags[2];
420};
421
422static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
423 {0x89,0x01},
424 {0x87,0x01},
425 {0x00}
426};
427
428static struct CPRBX static_cprbx = {
429 0x00DC,
430 0x02,
431 {0x00,0x00,0x00},
432 {0x54,0x32},
433 {0x00,0x00,0x00,0x00},
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 {0x00,0x00,0x00,0x00},
442 0x00000000,
443 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
444 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
445 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
446 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
447 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
448 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
449 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
450 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
451 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
452 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
453 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
454 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
455 0x0000,
456 0x0000,
457 0x00000000,
458 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
459 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
460 0x00,
461 0x00,
462 0x0000,
463 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
464 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
465 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
466 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
467};
468
469static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
470 {0x50,0x44},
471 {0x00,0x0A},
472 {'P','K','C','S','-','1','.','2'}
473};
474
475static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
476 {0x50,0x4B},
477 {0x00,0x0A},
478 {'Z','E','R','O','-','P','A','D'}
479};
480
481static struct function_and_rules_block static_pkd_function_and_rulesX = {
482 {0x50,0x44},
483 {0x00,0x0A},
484 {'Z','E','R','O','-','P','A','D'}
485};
486
487static struct function_and_rules_block static_pke_function_and_rulesX = {
488 {0x50,0x4B},
489 {0x00,0x0A},
490 {'M','R','P',' ',' ',' ',' ',' '}
491};
492
493static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
494
495struct T6_keyBlock_hdrX {
496 unsigned short blen;
497 unsigned short ulen;
498 unsigned char flags[2];
499};
500
501static unsigned char static_pad[256] = {
5020x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
5030x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
5040xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
5050x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
5060x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
5070x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
5080x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
5090xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
5100x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
5110x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
5120x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
5130x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
5140x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
5150x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
5160x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
5170x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
518};
519
520static struct cca_private_ext_ME static_pvt_me_key = {
521 {
522 0x1E,
523 0x00,
524 0x0183,
525 {0x00,0x00,0x00,0x00}
526 },
527
528 {
529 0x02,
530 0x00,
531 0x016C,
532 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
533 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
534 0x00,0x00,0x00,0x00},
535 {0x00,0x00,0x00,0x00},
536 0x00,
537 0x00,
538 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
539 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
540 0x00,0x00,0x00,0x00},
541 {0x80,0x00,0x00,0x00},
542 {0x00,0x00,0x00,0x00,0x00,0x00},
543 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
544 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
545 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
546 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
547 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
548 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
549 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
550 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
551 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
552 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
553 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
554 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
555 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
556 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
557 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
558 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
559 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
560 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
561 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
562 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
563 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
564 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
565 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
566 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
567 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
568 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
569 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
570 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
571 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
572 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
573 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
574 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
575 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
576 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
577 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
578 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
579 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
580 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
581 },
582
583 {
584 0x04,
585 0x00,
586 0x000F,
587 {0x00,0x00},
588 0x0003,
589 0x0000,
590 0x0000,
591 {0x01,0x00,0x01}
592 }
593};
594
595static struct cca_public_key static_public_key = {
596 {
597 0x1E,
598 0x00,
599 0x0000,
600 {0x00,0x00,0x00,0x00}
601 },
602
603 {
604 0x04,
605 0x00,
606 0x0000,
607 {0x00,0x00},
608 0x0000,
609 0x0000,
610 0x0000,
611 {0x01,0x00,0x01}
612 }
613};
614
615#define FIXED_TYPE6_ME_LEN 0x0000025F
616
617#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
618
619#define FIXED_TYPE6_ME_LENX 0x000002CB
620
621#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
622
623static struct cca_public_sec static_cca_pub_sec = {
624 0x04,
625 0x00,
626 0x000f,
627 {0x00,0x00},
628 0x0003,
629 0x0000,
630 0x0000,
631 {0x01,0x00,0x01}
632};
633
634#define FIXED_TYPE6_CR_LEN 0x00000177
635
636#define FIXED_TYPE6_CR_LENX 0x000001E3
637
638#define MAX_RESPONSE_SIZE 0x00000710
639
640#define MAX_RESPONSEX_SIZE 0x0000077C
641
642#define RESPONSE_CPRB_SIZE 0x000006B8
643#define RESPONSE_CPRBX_SIZE 0x00000724
644
645struct type50_hdr {
646 u8 reserved1;
647 u8 msg_type_code;
648 u16 msg_len;
649 u8 reserved2;
650 u8 ignored;
651 u16 reserved3;
652};
653
654#define TYPE50_TYPE_CODE 0x50
655
656#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
657#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
658#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
659#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
660
661#define TYPE50_MEB1_FMT 0x0001
662#define TYPE50_MEB2_FMT 0x0002
663#define TYPE50_CRB1_FMT 0x0011
664#define TYPE50_CRB2_FMT 0x0012
665
666struct type50_meb1_msg {
667 struct type50_hdr header;
668 u16 keyblock_type;
669 u8 reserved[6];
670 u8 exponent[128];
671 u8 modulus[128];
672 u8 message[128];
673};
674
675struct type50_meb2_msg {
676 struct type50_hdr header;
677 u16 keyblock_type;
678 u8 reserved[6];
679 u8 exponent[256];
680 u8 modulus[256];
681 u8 message[256];
682};
683
684struct type50_crb1_msg {
685 struct type50_hdr header;
686 u16 keyblock_type;
687 u8 reserved[6];
688 u8 p[64];
689 u8 q[64];
690 u8 dp[64];
691 u8 dq[64];
692 u8 u[64];
693 u8 message[128];
694};
695
696struct type50_crb2_msg {
697 struct type50_hdr header;
698 u16 keyblock_type;
699 u8 reserved[6];
700 u8 p[128];
701 u8 q[128];
702 u8 dp[128];
703 u8 dq[128];
704 u8 u[128];
705 u8 message[256];
706};
707
708union type50_msg {
709 struct type50_meb1_msg meb1;
710 struct type50_meb2_msg meb2;
711 struct type50_crb1_msg crb1;
712 struct type50_crb2_msg crb2;
713};
714
715struct type80_hdr {
716 u8 reserved1;
717 u8 type;
718 u16 len;
719 u8 code;
720 u8 reserved2[3];
721 u8 reserved3[8];
722};
723
724#define TYPE80_RSP_CODE 0x80
725
726struct error_hdr {
727 unsigned char reserved1;
728 unsigned char type;
729 unsigned char reserved2[2];
730 unsigned char reply_code;
731 unsigned char reserved3[3];
732};
733
734#define TYPE82_RSP_CODE 0x82
735#define TYPE88_RSP_CODE 0x88
736
737#define REP82_ERROR_MACHINE_FAILURE 0x10
738#define REP82_ERROR_PREEMPT_FAILURE 0x12
739#define REP82_ERROR_CHECKPT_FAILURE 0x14
740#define REP82_ERROR_MESSAGE_TYPE 0x20
741#define REP82_ERROR_INVALID_COMM_CD 0x21
742#define REP82_ERROR_INVALID_MSG_LEN 0x23
743#define REP82_ERROR_RESERVD_FIELD 0x24
744#define REP82_ERROR_FORMAT_FIELD 0x29
745#define REP82_ERROR_INVALID_COMMAND 0x30
746#define REP82_ERROR_MALFORMED_MSG 0x40
747#define REP82_ERROR_RESERVED_FIELDO 0x50
748#define REP82_ERROR_WORD_ALIGNMENT 0x60
749#define REP82_ERROR_MESSAGE_LENGTH 0x80
750#define REP82_ERROR_OPERAND_INVALID 0x82
751#define REP82_ERROR_OPERAND_SIZE 0x84
752#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
753#define REP82_ERROR_RESERVED_FIELD 0x88
754#define REP82_ERROR_TRANSPORT_FAIL 0x90
755#define REP82_ERROR_PACKET_TRUNCATED 0xA0
756#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
757
758#define REP88_ERROR_MODULE_FAILURE 0x10
759#define REP88_ERROR_MODULE_TIMEOUT 0x11
760#define REP88_ERROR_MODULE_NOTINIT 0x13
761#define REP88_ERROR_MODULE_NOTAVAIL 0x14
762#define REP88_ERROR_MODULE_DISABLED 0x15
763#define REP88_ERROR_MODULE_IN_DIAGN 0x17
764#define REP88_ERROR_FASTPATH_DISABLD 0x19
765#define REP88_ERROR_MESSAGE_TYPE 0x20
766#define REP88_ERROR_MESSAGE_MALFORMD 0x22
767#define REP88_ERROR_MESSAGE_LENGTH 0x23
768#define REP88_ERROR_RESERVED_FIELD 0x24
769#define REP88_ERROR_KEY_TYPE 0x34
770#define REP88_ERROR_INVALID_KEY 0x82
771#define REP88_ERROR_OPERAND 0x84
772#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
773
774#define CALLER_HEADER 12
775
776static inline int
777testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
778{
779 int ccode;
780
781 asm volatile
782#ifdef CONFIG_64BIT
783 (" llgfr 0,%4 \n"
784 " slgr 1,1 \n"
785 " lgr 2,1 \n"
786 "0: .long 0xb2af0000 \n"
787 "1: ipm %0 \n"
788 " srl %0,28 \n"
789 " iihh %0,0 \n"
790 " iihl %0,0 \n"
791 " lgr %1,1 \n"
792 " lgr %3,2 \n"
793 " srl %3,24 \n"
794 " sll 2,24 \n"
795 " srl 2,24 \n"
796 " lgr %2,2 \n"
797 "2: \n"
798 ".section .fixup,\"ax\" \n"
799 "3: \n"
800 " lhi %0,%h5 \n"
801 " jg 2b \n"
802 ".previous \n"
803 ".section __ex_table,\"a\" \n"
804 " .align 8 \n"
805 " .quad 0b,3b \n"
806 " .quad 1b,3b \n"
807 ".previous"
808 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
809 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
810 :"cc","0","1","2","memory");
811#else
812 (" lr 0,%4 \n"
813 " slr 1,1 \n"
814 " lr 2,1 \n"
815 "0: .long 0xb2af0000 \n"
816 "1: ipm %0 \n"
817 " srl %0,28 \n"
818 " lr %1,1 \n"
819 " lr %3,2 \n"
820 " srl %3,24 \n"
821 " sll 2,24 \n"
822 " srl 2,24 \n"
823 " lr %2,2 \n"
824 "2: \n"
825 ".section .fixup,\"ax\" \n"
826 "3: \n"
827 " lhi %0,%h5 \n"
828 " bras 1,4f \n"
829 " .long 2b \n"
830 "4: \n"
831 " l 1,0(1) \n"
832 " br 1 \n"
833 ".previous \n"
834 ".section __ex_table,\"a\" \n"
835 " .align 4 \n"
836 " .long 0b,3b \n"
837 " .long 1b,3b \n"
838 ".previous"
839 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
840 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
841 :"cc","0","1","2","memory");
842#endif
843 return ccode;
844}
845
846static inline int
847resetq(int q_nr, struct ap_status_word *stat_p)
848{
849 int ccode;
850
851 asm volatile
852#ifdef CONFIG_64BIT
853 (" llgfr 0,%2 \n"
854 " lghi 1,1 \n"
855 " sll 1,24 \n"
856 " or 0,1 \n"
857 " slgr 1,1 \n"
858 " lgr 2,1 \n"
859 "0: .long 0xb2af0000 \n"
860 "1: ipm %0 \n"
861 " srl %0,28 \n"
862 " iihh %0,0 \n"
863 " iihl %0,0 \n"
864 " lgr %1,1 \n"
865 "2: \n"
866 ".section .fixup,\"ax\" \n"
867 "3: \n"
868 " lhi %0,%h3 \n"
869 " jg 2b \n"
870 ".previous \n"
871 ".section __ex_table,\"a\" \n"
872 " .align 8 \n"
873 " .quad 0b,3b \n"
874 " .quad 1b,3b \n"
875 ".previous"
876 :"=d" (ccode),"=d" (*stat_p)
877 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
878 :"cc","0","1","2","memory");
879#else
880 (" lr 0,%2 \n"
881 " lhi 1,1 \n"
882 " sll 1,24 \n"
883 " or 0,1 \n"
884 " slr 1,1 \n"
885 " lr 2,1 \n"
886 "0: .long 0xb2af0000 \n"
887 "1: ipm %0 \n"
888 " srl %0,28 \n"
889 " lr %1,1 \n"
890 "2: \n"
891 ".section .fixup,\"ax\" \n"
892 "3: \n"
893 " lhi %0,%h3 \n"
894 " bras 1,4f \n"
895 " .long 2b \n"
896 "4: \n"
897 " l 1,0(1) \n"
898 " br 1 \n"
899 ".previous \n"
900 ".section __ex_table,\"a\" \n"
901 " .align 4 \n"
902 " .long 0b,3b \n"
903 " .long 1b,3b \n"
904 ".previous"
905 :"=d" (ccode),"=d" (*stat_p)
906 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
907 :"cc","0","1","2","memory");
908#endif
909 return ccode;
910}
911
912static inline int
913sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
914{
915 int ccode;
916
917 asm volatile
918#ifdef CONFIG_64BIT
919 (" lgr 6,%3 \n"
920 " llgfr 7,%2 \n"
921 " llgt 0,0(6) \n"
922 " lghi 1,64 \n"
923 " sll 1,24 \n"
924 " or 0,1 \n"
925 " la 6,4(6) \n"
926 " llgt 2,0(6) \n"
927 " llgt 3,4(6) \n"
928 " la 6,8(6) \n"
929 " slr 1,1 \n"
930 "0: .long 0xb2ad0026 \n"
931 "1: brc 2,0b \n"
932 " ipm %0 \n"
933 " srl %0,28 \n"
934 " iihh %0,0 \n"
935 " iihl %0,0 \n"
936 " lgr %1,1 \n"
937 "2: \n"
938 ".section .fixup,\"ax\" \n"
939 "3: \n"
940 " lhi %0,%h4 \n"
941 " jg 2b \n"
942 ".previous \n"
943 ".section __ex_table,\"a\" \n"
944 " .align 8 \n"
945 " .quad 0b,3b \n"
946 " .quad 1b,3b \n"
947 ".previous"
948 :"=d" (ccode),"=d" (*stat)
949 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
950 :"cc","0","1","2","3","6","7","memory");
951#else
952 (" lr 6,%3 \n"
953 " lr 7,%2 \n"
954 " l 0,0(6) \n"
955 " lhi 1,64 \n"
956 " sll 1,24 \n"
957 " or 0,1 \n"
958 " la 6,4(6) \n"
959 " l 2,0(6) \n"
960 " l 3,4(6) \n"
961 " la 6,8(6) \n"
962 " slr 1,1 \n"
963 "0: .long 0xb2ad0026 \n"
964 "1: brc 2,0b \n"
965 " ipm %0 \n"
966 " srl %0,28 \n"
967 " lr %1,1 \n"
968 "2: \n"
969 ".section .fixup,\"ax\" \n"
970 "3: \n"
971 " lhi %0,%h4 \n"
972 " bras 1,4f \n"
973 " .long 2b \n"
974 "4: \n"
975 " l 1,0(1) \n"
976 " br 1 \n"
977 ".previous \n"
978 ".section __ex_table,\"a\" \n"
979 " .align 4 \n"
980 " .long 0b,3b \n"
981 " .long 1b,3b \n"
982 ".previous"
983 :"=d" (ccode),"=d" (*stat)
984 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
985 :"cc","0","1","2","3","6","7","memory");
986#endif
987 return ccode;
988}
989
990static inline int
991rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
992 struct ap_status_word *st)
993{
994 int ccode;
995
996 asm volatile
997#ifdef CONFIG_64BIT
998 (" llgfr 0,%2 \n"
999 " lgr 3,%4 \n"
1000 " lgr 6,%3 \n"
1001 " llgfr 7,%5 \n"
1002 " lghi 1,128 \n"
1003 " sll 1,24 \n"
1004 " or 0,1 \n"
1005 " slgr 1,1 \n"
1006 " lgr 2,1 \n"
1007 " lgr 4,1 \n"
1008 " lgr 5,1 \n"
1009 "0: .long 0xb2ae0046 \n"
1010 "1: brc 2,0b \n"
1011 " brc 4,0b \n"
1012 " ipm %0 \n"
1013 " srl %0,28 \n"
1014 " iihh %0,0 \n"
1015 " iihl %0,0 \n"
1016 " lgr %1,1 \n"
1017 " st 4,0(3) \n"
1018 " st 5,4(3) \n"
1019 "2: \n"
1020 ".section .fixup,\"ax\" \n"
1021 "3: \n"
1022 " lhi %0,%h6 \n"
1023 " jg 2b \n"
1024 ".previous \n"
1025 ".section __ex_table,\"a\" \n"
1026 " .align 8 \n"
1027 " .quad 0b,3b \n"
1028 " .quad 1b,3b \n"
1029 ".previous"
1030 :"=d"(ccode),"=d"(*st)
1031 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1032 :"cc","0","1","2","3","4","5","6","7","memory");
1033#else
1034 (" lr 0,%2 \n"
1035 " lr 3,%4 \n"
1036 " lr 6,%3 \n"
1037 " lr 7,%5 \n"
1038 " lhi 1,128 \n"
1039 " sll 1,24 \n"
1040 " or 0,1 \n"
1041 " slr 1,1 \n"
1042 " lr 2,1 \n"
1043 " lr 4,1 \n"
1044 " lr 5,1 \n"
1045 "0: .long 0xb2ae0046 \n"
1046 "1: brc 2,0b \n"
1047 " brc 4,0b \n"
1048 " ipm %0 \n"
1049 " srl %0,28 \n"
1050 " lr %1,1 \n"
1051 " st 4,0(3) \n"
1052 " st 5,4(3) \n"
1053 "2: \n"
1054 ".section .fixup,\"ax\" \n"
1055 "3: \n"
1056 " lhi %0,%h6 \n"
1057 " bras 1,4f \n"
1058 " .long 2b \n"
1059 "4: \n"
1060 " l 1,0(1) \n"
1061 " br 1 \n"
1062 ".previous \n"
1063 ".section __ex_table,\"a\" \n"
1064 " .align 4 \n"
1065 " .long 0b,3b \n"
1066 " .long 1b,3b \n"
1067 ".previous"
1068 :"=d"(ccode),"=d"(*st)
1069 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1070 :"cc","0","1","2","3","4","5","6","7","memory");
1071#endif
1072 return ccode;
1073}
1074
1075static inline void
1076itoLe2(int *i_p, unsigned char *lechars)
1077{
1078 *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
1079 *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
1080}
1081
1082static inline void
1083le2toI(unsigned char *lechars, int *i_p)
1084{
1085 unsigned char *ic_p;
1086 *i_p = 0;
1087 ic_p = (unsigned char *) i_p;
1088 *(ic_p + 2) = *(lechars + 1);
1089 *(ic_p + 3) = *(lechars);
1090}
1091
1092static inline int
1093is_empty(unsigned char *ptr, int len)
1094{
1095 return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
1096}
1097
1098enum hdstat
1099query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1100{
1101 int q_nr, i, t_depth, t_dev_type;
1102 enum devstat ccode;
1103 struct ap_status_word stat_word;
1104 enum hdstat stat;
1105 int break_out;
1106
1107 q_nr = (deviceNr << SKIP_BITL) + cdx;
1108 stat = HD_BUSY;
1109 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1110 PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
1111 break_out = 0;
1112 for (i = 0; i < resetNr; i++) {
1113 if (ccode > 3) {
1114 PRINTKC("Exception testing device %d\n", i);
1115 return HD_TSQ_EXCEPTION;
1116 }
1117 switch (ccode) {
1118 case 0:
1119 PDEBUG("t_dev_type %d\n", t_dev_type);
1120 break_out = 1;
1121 stat = HD_ONLINE;
1122 *q_depth = t_depth + 1;
1123 switch (t_dev_type) {
1124 case PCICA_HW:
1125 *dev_type = PCICA;
1126 break;
1127 case PCICC_HW:
1128 *dev_type = PCICC;
1129 break;
1130 case PCIXCC_HW:
1131 *dev_type = PCIXCC_UNK;
1132 break;
1133 case CEX2C_HW:
1134 *dev_type = CEX2C;
1135 break;
1136 case CEX2A_HW:
1137 *dev_type = CEX2A;
1138 break;
1139 default:
1140 *dev_type = NILDEV;
1141 break;
1142 }
1143 PDEBUG("available device %d: Q depth = %d, dev "
1144 "type = %d, stat = %02X%02X%02X%02X\n",
1145 deviceNr, *q_depth, *dev_type,
1146 stat_word.q_stat_flags,
1147 stat_word.response_code,
1148 stat_word.reserved[0],
1149 stat_word.reserved[1]);
1150 break;
1151 case 3:
1152 switch (stat_word.response_code) {
1153 case AP_RESPONSE_NORMAL:
1154 stat = HD_ONLINE;
1155 break_out = 1;
1156 *q_depth = t_depth + 1;
1157 *dev_type = t_dev_type;
1158 PDEBUG("cc3, available device "
1159 "%d: Q depth = %d, dev "
1160 "type = %d, stat = "
1161 "%02X%02X%02X%02X\n",
1162 deviceNr, *q_depth,
1163 *dev_type,
1164 stat_word.q_stat_flags,
1165 stat_word.response_code,
1166 stat_word.reserved[0],
1167 stat_word.reserved[1]);
1168 break;
1169 case AP_RESPONSE_Q_NOT_AVAIL:
1170 stat = HD_NOT_THERE;
1171 break_out = 1;
1172 break;
1173 case AP_RESPONSE_RESET_IN_PROGRESS:
1174 PDEBUG("device %d in reset\n",
1175 deviceNr);
1176 break;
1177 case AP_RESPONSE_DECONFIGURED:
1178 stat = HD_DECONFIGURED;
1179 break_out = 1;
1180 break;
1181 case AP_RESPONSE_CHECKSTOPPED:
1182 stat = HD_CHECKSTOPPED;
1183 break_out = 1;
1184 break;
1185 case AP_RESPONSE_BUSY:
1186 PDEBUG("device %d busy\n",
1187 deviceNr);
1188 break;
1189 default:
1190 break;
1191 }
1192 break;
1193 default:
1194 stat = HD_NOT_THERE;
1195 break_out = 1;
1196 break;
1197 }
1198 if (break_out)
1199 break;
1200
1201 udelay(5);
1202
1203 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1204 }
1205 return stat;
1206}
1207
1208enum devstat
1209reset_device(int deviceNr, int cdx, int resetNr)
1210{
1211 int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
1212 struct ap_status_word stat_word;
1213 enum devstat stat;
1214 int break_out;
1215
1216 q_nr = (deviceNr << SKIP_BITL) + cdx;
1217 stat = DEV_GONE;
1218 ccode = resetq(q_nr, &stat_word);
1219 if (ccode > 3)
1220 return DEV_RSQ_EXCEPTION;
1221
1222 break_out = 0;
1223 for (i = 0; i < resetNr; i++) {
1224 switch (ccode) {
1225 case 0:
1226 stat = DEV_ONLINE;
1227 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1228 break_out = 1;
1229 break;
1230 case 3:
1231 switch (stat_word.response_code) {
1232 case AP_RESPONSE_NORMAL:
1233 stat = DEV_ONLINE;
1234 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1235 break_out = 1;
1236 break;
1237 case AP_RESPONSE_Q_NOT_AVAIL:
1238 case AP_RESPONSE_DECONFIGURED:
1239 case AP_RESPONSE_CHECKSTOPPED:
1240 stat = DEV_GONE;
1241 break_out = 1;
1242 break;
1243 case AP_RESPONSE_RESET_IN_PROGRESS:
1244 case AP_RESPONSE_BUSY:
1245 default:
1246 break;
1247 }
1248 break;
1249 default:
1250 stat = DEV_GONE;
1251 break_out = 1;
1252 break;
1253 }
1254 if (break_out == 1)
1255 break;
1256 udelay(5);
1257
1258 ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
1259 if (ccode > 3) {
1260 stat = DEV_TSQ_EXCEPTION;
1261 break;
1262 }
1263 }
1264 PDEBUG("Number of testq's needed for reset: %d\n", i);
1265
1266 if (i >= resetNr) {
1267 stat = DEV_GONE;
1268 }
1269
1270 return stat;
1271}
1272
1273#ifdef DEBUG_HYDRA_MSGS
1274static inline void
1275print_buffer(unsigned char *buffer, int bufflen)
1276{
1277 int i;
1278 for (i = 0; i < bufflen; i += 16) {
1279 PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
1280 "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
1281 buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
1282 buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
1283 buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
1284 buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
1285 }
1286}
1287#endif
1288
1289enum devstat
1290send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
1291{
1292 struct ap_status_word stat_word;
1293 enum devstat stat;
1294 int ccode;
1295 u32 *q_nr_p = (u32 *)msg_ext;
1296
1297 *q_nr_p = (dev_nr << SKIP_BITL) + cdx;
1298 PDEBUG("msg_len passed to sen: %d\n", msg_len);
1299 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
1300 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
1301 stat = DEV_GONE;
1302
1303#ifdef DEBUG_HYDRA_MSGS
1304 PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
1305 "%02X%02X%02X%02X\n",
1306 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
1307 msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
1308 msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
1309 print_buffer(msg_ext+CALLER_HEADER, msg_len);
1310#endif
1311
1312 ccode = sen(msg_len, msg_ext, &stat_word);
1313 if (ccode > 3)
1314 return DEV_SEN_EXCEPTION;
1315
1316 PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
1317 ccode, stat_word.q_stat_flags, stat_word.response_code,
1318 stat_word.reserved[0], stat_word.reserved[1]);
1319 switch (ccode) {
1320 case 0:
1321 stat = DEV_ONLINE;
1322 break;
1323 case 1:
1324 stat = DEV_GONE;
1325 break;
1326 case 3:
1327 switch (stat_word.response_code) {
1328 case AP_RESPONSE_NORMAL:
1329 stat = DEV_ONLINE;
1330 break;
1331 case AP_RESPONSE_Q_FULL:
1332 stat = DEV_QUEUE_FULL;
1333 break;
1334 default:
1335 stat = DEV_GONE;
1336 break;
1337 }
1338 break;
1339 default:
1340 stat = DEV_GONE;
1341 break;
1342 }
1343
1344 return stat;
1345}
1346
1347enum devstat
1348receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
1349 unsigned char *psmid)
1350{
1351 int ccode;
1352 struct ap_status_word stat_word;
1353 enum devstat stat;
1354
1355 memset(resp, 0x00, 8);
1356
1357 ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
1358 &stat_word);
1359 if (ccode > 3)
1360 return DEV_REC_EXCEPTION;
1361
1362 PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
1363 ccode, stat_word.q_stat_flags, stat_word.response_code,
1364 stat_word.reserved[0], stat_word.reserved[1]);
1365
1366 stat = DEV_GONE;
1367 switch (ccode) {
1368 case 0:
1369 stat = DEV_ONLINE;
1370#ifdef DEBUG_HYDRA_MSGS
1371 print_buffer(resp, resplen);
1372#endif
1373 break;
1374 case 3:
1375 switch (stat_word.response_code) {
1376 case AP_RESPONSE_NORMAL:
1377 stat = DEV_ONLINE;
1378 break;
1379 case AP_RESPONSE_NO_PENDING_REPLY:
1380 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1381 stat = DEV_EMPTY;
1382 else
1383 stat = DEV_NO_WORK;
1384 break;
1385 case AP_RESPONSE_INDEX_TOO_BIG:
1386 case AP_RESPONSE_NO_FIRST_PART:
1387 case AP_RESPONSE_MESSAGE_TOO_BIG:
1388 stat = DEV_BAD_MESSAGE;
1389 break;
1390 default:
1391 break;
1392 }
1393 break;
1394 default:
1395 break;
1396 }
1397
1398 return stat;
1399}
1400
1401static inline int
1402pad_msg(unsigned char *buffer, int totalLength, int msgLength)
1403{
1404 int pad_len;
1405
1406 for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
1407 if (buffer[pad_len] != 0x00)
1408 break;
1409 pad_len -= 3;
1410 if (pad_len < 8)
1411 return SEN_PAD_ERROR;
1412
1413 buffer[0] = 0x00;
1414 buffer[1] = 0x02;
1415
1416 memcpy(buffer+2, static_pad, pad_len);
1417
1418 buffer[pad_len + 2] = 0x00;
1419
1420 return 0;
1421}
1422
1423static inline int
1424is_common_public_key(unsigned char *key, int len)
1425{
1426 int i;
1427
1428 for (i = 0; i < len; i++)
1429 if (key[i])
1430 break;
1431 key += i;
1432 len -= i;
1433 if (((len == 1) && (key[0] == 3)) ||
1434 ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
1435 return 1;
1436
1437 return 0;
1438}
1439
1440static int
1441ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
1442 union type4_msg *z90cMsg_p)
1443{
1444 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
1445 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
1446 union type4_msg *tmp_type4_msg;
1447
1448 mod_len = icaMex_p->inputdatalength;
1449
1450 msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
1451 CALLER_HEADER;
1452
1453 memset(z90cMsg_p, 0, msg_size);
1454
1455 tmp_type4_msg = (union type4_msg *)
1456 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1457
1458 tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
1459 tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
1460
1461 if (mod_len <= 128) {
1462 tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
1463 tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
1464 mod_tgt = tmp_type4_msg->sme.modulus;
1465 mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
1466 exp_tgt = tmp_type4_msg->sme.exponent;
1467 exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
1468 inp_tgt = tmp_type4_msg->sme.message;
1469 inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
1470 } else {
1471 tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
1472 tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
1473 mod_tgt = tmp_type4_msg->lme.modulus;
1474 mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
1475 exp_tgt = tmp_type4_msg->lme.exponent;
1476 exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
1477 inp_tgt = tmp_type4_msg->lme.message;
1478 inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
1479 }
1480
1481 mod_tgt += (mod_tgt_len - mod_len);
1482 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
1483 return SEN_RELEASED;
1484 if (is_empty(mod_tgt, mod_len))
1485 return SEN_USER_ERROR;
1486 exp_tgt += (exp_tgt_len - mod_len);
1487 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
1488 return SEN_RELEASED;
1489 if (is_empty(exp_tgt, mod_len))
1490 return SEN_USER_ERROR;
1491 inp_tgt += (inp_tgt_len - mod_len);
1492 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
1493 return SEN_RELEASED;
1494 if (is_empty(inp_tgt, mod_len))
1495 return SEN_USER_ERROR;
1496
1497 *z90cMsg_l_p = msg_size - CALLER_HEADER;
1498
1499 return 0;
1500}
1501
1502static int
1503ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
1504 int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
1505{
1506 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
1507 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
1508 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
1509 union type4_msg *tmp_type4_msg;
1510
1511 mod_len = icaMsg_p->inputdatalength;
1512 short_len = mod_len / 2;
1513 long_len = mod_len / 2 + 8;
1514
1515 tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
1516 CALLER_HEADER;
1517
1518 memset(z90cMsg_p, 0, tmp_size);
1519
1520 tmp_type4_msg = (union type4_msg *)
1521 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1522
1523 tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
1524 tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
1525 if (mod_len <= 128) {
1526 tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
1527 tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
1528 p_tgt = tmp_type4_msg->scr.p;
1529 p_tgt_len = sizeof(tmp_type4_msg->scr.p);
1530 q_tgt = tmp_type4_msg->scr.q;
1531 q_tgt_len = sizeof(tmp_type4_msg->scr.q);
1532 dp_tgt = tmp_type4_msg->scr.dp;
1533 dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
1534 dq_tgt = tmp_type4_msg->scr.dq;
1535 dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
1536 u_tgt = tmp_type4_msg->scr.u;
1537 u_tgt_len = sizeof(tmp_type4_msg->scr.u);
1538 inp_tgt = tmp_type4_msg->scr.message;
1539 inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
1540 } else {
1541 tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
1542 tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
1543 p_tgt = tmp_type4_msg->lcr.p;
1544 p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
1545 q_tgt = tmp_type4_msg->lcr.q;
1546 q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
1547 dp_tgt = tmp_type4_msg->lcr.dp;
1548 dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
1549 dq_tgt = tmp_type4_msg->lcr.dq;
1550 dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
1551 u_tgt = tmp_type4_msg->lcr.u;
1552 u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
1553 inp_tgt = tmp_type4_msg->lcr.message;
1554 inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
1555 }
1556
1557 p_tgt += (p_tgt_len - long_len);
1558 if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
1559 return SEN_RELEASED;
1560 if (is_empty(p_tgt, long_len))
1561 return SEN_USER_ERROR;
1562 q_tgt += (q_tgt_len - short_len);
1563 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
1564 return SEN_RELEASED;
1565 if (is_empty(q_tgt, short_len))
1566 return SEN_USER_ERROR;
1567 dp_tgt += (dp_tgt_len - long_len);
1568 if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
1569 return SEN_RELEASED;
1570 if (is_empty(dp_tgt, long_len))
1571 return SEN_USER_ERROR;
1572 dq_tgt += (dq_tgt_len - short_len);
1573 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
1574 return SEN_RELEASED;
1575 if (is_empty(dq_tgt, short_len))
1576 return SEN_USER_ERROR;
1577 u_tgt += (u_tgt_len - long_len);
1578 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
1579 return SEN_RELEASED;
1580 if (is_empty(u_tgt, long_len))
1581 return SEN_USER_ERROR;
1582 inp_tgt += (inp_tgt_len - mod_len);
1583 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
1584 return SEN_RELEASED;
1585 if (is_empty(inp_tgt, mod_len))
1586 return SEN_USER_ERROR;
1587
1588 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1589
1590 return 0;
1591}
1592
1593static int
1594ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1595 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1596{
1597 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1598 unsigned char *temp;
1599 struct type6_hdr *tp6Hdr_p;
1600 struct CPRB *cprb_p;
1601 struct cca_private_ext_ME *key_p;
1602 static int deprecated_msg_count = 0;
1603
1604 mod_len = icaMsg_p->inputdatalength;
1605 tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
1606 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1607 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1608 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1609
1610 memset(z90cMsg_p, 0, tmp_size);
1611
1612 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1613 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1614 tp6Hdr_p = (struct type6_hdr *)temp;
1615 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1616 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1617
1618 temp += sizeof(struct type6_hdr);
1619 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1620 cprb_p = (struct CPRB *) temp;
1621 cprb_p->usage_domain[0]= (unsigned char)cdx;
1622 itoLe2(&parmBlock_l, cprb_p->req_parml);
1623 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1624
1625 temp += sizeof(struct CPRB);
1626 memcpy(temp, &static_pkd_function_and_rules,
1627 sizeof(struct function_and_rules_block));
1628
1629 temp += sizeof(struct function_and_rules_block);
1630 vud_len = 2 + icaMsg_p->inputdatalength;
1631 itoLe2(&vud_len, temp);
1632
1633 temp += 2;
1634 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
1635 return SEN_RELEASED;
1636 if (is_empty(temp, mod_len))
1637 return SEN_USER_ERROR;
1638
1639 temp += mod_len;
1640 memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
1641
1642 temp += sizeof(struct T6_keyBlock_hdr);
1643 memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
1644 key_p = (struct cca_private_ext_ME *)temp;
1645 temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
1646 - mod_len;
1647 if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
1648 return SEN_RELEASED;
1649 if (is_empty(temp, mod_len))
1650 return SEN_USER_ERROR;
1651
1652 if (is_common_public_key(temp, mod_len)) {
1653 if (deprecated_msg_count < 20) {
1654 PRINTK("Common public key used for modex decrypt\n");
1655 deprecated_msg_count++;
1656 if (deprecated_msg_count == 20)
1657 PRINTK("No longer issuing messages about common"
1658 " public key for modex decrypt.\n");
1659 }
1660 return SEN_NOT_AVAIL;
1661 }
1662
1663 temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
1664 - mod_len;
1665 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1666 return SEN_RELEASED;
1667 if (is_empty(temp, mod_len))
1668 return SEN_USER_ERROR;
1669
1670 key_p->pubMESec.modulus_bit_len = 8 * mod_len;
1671
1672 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1673
1674 return 0;
1675}
1676
1677static int
1678ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1679 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1680{
1681 int mod_len, vud_len, exp_len, key_len;
1682 int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
1683 unsigned char *temp_exp, *exp_p, *temp;
1684 struct type6_hdr *tp6Hdr_p;
1685 struct CPRB *cprb_p;
1686 struct cca_public_key *key_p;
1687 struct T6_keyBlock_hdr *keyb_p;
1688
1689 temp_exp = kmalloc(256, GFP_KERNEL);
1690 if (!temp_exp)
1691 return EGETBUFF;
1692 mod_len = icaMsg_p->inputdatalength;
1693 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1694 kfree(temp_exp);
1695 return SEN_RELEASED;
1696 }
1697 if (is_empty(temp_exp, mod_len)) {
1698 kfree(temp_exp);
1699 return SEN_USER_ERROR;
1700 }
1701
1702 exp_p = temp_exp;
1703 for (i = 0; i < mod_len; i++)
1704 if (exp_p[i])
1705 break;
1706 if (i >= mod_len) {
1707 kfree(temp_exp);
1708 return SEN_USER_ERROR;
1709 }
1710
1711 exp_len = mod_len - i;
1712 exp_p += i;
1713
1714 PDEBUG("exp_len after computation: %08x\n", exp_len);
1715 tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
1716 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1717 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1718 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1719
1720 vud_len = 2 + mod_len;
1721 memset(z90cMsg_p, 0, tmp_size);
1722
1723 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1724 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1725 tp6Hdr_p = (struct type6_hdr *)temp;
1726 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1727 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1728 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1729 sizeof(static_PKE_function_code));
1730 temp += sizeof(struct type6_hdr);
1731 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1732 cprb_p = (struct CPRB *) temp;
1733 cprb_p->usage_domain[0]= (unsigned char)cdx;
1734 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1735 temp += sizeof(struct CPRB);
1736 memcpy(temp, &static_pke_function_and_rules,
1737 sizeof(struct function_and_rules_block));
1738 temp += sizeof(struct function_and_rules_block);
1739 temp += 2;
1740 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
1741 kfree(temp_exp);
1742 return SEN_RELEASED;
1743 }
1744 if (is_empty(temp, mod_len)) {
1745 kfree(temp_exp);
1746 return SEN_USER_ERROR;
1747 }
1748 if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
1749 kfree(temp_exp);
1750 return SEN_NOT_AVAIL;
1751 }
1752 for (i = 2; i < mod_len; i++)
1753 if (temp[i] == 0x00)
1754 break;
1755 if ((i < 9) || (i > (mod_len - 2))) {
1756 kfree(temp_exp);
1757 return SEN_NOT_AVAIL;
1758 }
1759 pad_len = i + 1;
1760 vud_len = mod_len - pad_len;
1761 memmove(temp, temp+pad_len, vud_len);
1762 temp -= 2;
1763 vud_len += 2;
1764 itoLe2(&vud_len, temp);
1765 temp += (vud_len);
1766 keyb_p = (struct T6_keyBlock_hdr *)temp;
1767 temp += sizeof(struct T6_keyBlock_hdr);
1768 memcpy(temp, &static_public_key, sizeof(static_public_key));
1769 key_p = (struct cca_public_key *)temp;
1770 temp = key_p->pubSec.exponent;
1771 memcpy(temp, exp_p, exp_len);
1772 kfree(temp_exp);
1773 temp += exp_len;
1774 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1775 return SEN_RELEASED;
1776 if (is_empty(temp, mod_len))
1777 return SEN_USER_ERROR;
1778 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1779 key_p->pubSec.modulus_byte_len = mod_len;
1780 key_p->pubSec.exponent_len = exp_len;
1781 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1782 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1783 key_p->pubHdr.token_length = key_len;
1784 key_len += 4;
1785 itoLe2(&key_len, keyb_p->ulen);
1786 key_len += 2;
1787 itoLe2(&key_len, keyb_p->blen);
1788 parmBlock_l -= pad_len;
1789 itoLe2(&parmBlock_l, cprb_p->req_parml);
1790 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1791
1792 return 0;
1793}
1794
1795static int
1796ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
1797 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1798{
1799 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
1800 int long_len, pad_len, keyPartsLen, tmp_l;
1801 unsigned char *tgt_p, *temp;
1802 struct type6_hdr *tp6Hdr_p;
1803 struct CPRB *cprb_p;
1804 struct cca_token_hdr *keyHdr_p;
1805 struct cca_pvt_ext_CRT_sec *pvtSec_p;
1806 struct cca_public_sec *pubSec_p;
1807
1808 mod_len = icaMsg_p->inputdatalength;
1809 short_len = mod_len / 2;
1810 long_len = 8 + short_len;
1811 keyPartsLen = 3 * long_len + 2 * short_len;
1812 pad_len = (8 - (keyPartsLen % 8)) % 8;
1813 keyPartsLen += pad_len + mod_len;
1814 tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
1815 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1816 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1817 vud_len = 2 + mod_len;
1818 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1819
1820 memset(z90cMsg_p, 0, tmp_size);
1821 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1822 memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
1823 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1824 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1825 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1826 tgt_p += sizeof(struct type6_hdr);
1827 cprb_p = (struct CPRB *) tgt_p;
1828 memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
1829 cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
1830 itoLe2(&parmBlock_l, cprb_p->req_parml);
1831 memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
1832 sizeof(cprb_p->req_parml));
1833 tgt_p += sizeof(struct CPRB);
1834 memcpy(tgt_p, &static_pkd_function_and_rules,
1835 sizeof(struct function_and_rules_block));
1836 tgt_p += sizeof(struct function_and_rules_block);
1837 itoLe2(&vud_len, tgt_p);
1838 tgt_p += 2;
1839 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
1840 return SEN_RELEASED;
1841 if (is_empty(tgt_p, mod_len))
1842 return SEN_USER_ERROR;
1843 tgt_p += mod_len;
1844 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
1845 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
1846 itoLe2(&tmp_l, tgt_p);
1847 temp = tgt_p + 2;
1848 tmp_l -= 2;
1849 itoLe2(&tmp_l, temp);
1850 tgt_p += sizeof(struct T6_keyBlock_hdr);
1851 keyHdr_p = (struct cca_token_hdr *)tgt_p;
1852 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
1853 tmp_l -= 4;
1854 keyHdr_p->token_length = tmp_l;
1855 tgt_p += sizeof(struct cca_token_hdr);
1856 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
1857 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
1858 pvtSec_p->section_length =
1859 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
1860 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
1861 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
1862 pvtSec_p->p_len = long_len;
1863 pvtSec_p->q_len = short_len;
1864 pvtSec_p->dp_len = long_len;
1865 pvtSec_p->dq_len = short_len;
1866 pvtSec_p->u_len = long_len;
1867 pvtSec_p->mod_len = mod_len;
1868 pvtSec_p->pad_len = pad_len;
1869 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
1870 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
1871 return SEN_RELEASED;
1872 if (is_empty(tgt_p, long_len))
1873 return SEN_USER_ERROR;
1874 tgt_p += long_len;
1875 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
1876 return SEN_RELEASED;
1877 if (is_empty(tgt_p, short_len))
1878 return SEN_USER_ERROR;
1879 tgt_p += short_len;
1880 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
1881 return SEN_RELEASED;
1882 if (is_empty(tgt_p, long_len))
1883 return SEN_USER_ERROR;
1884 tgt_p += long_len;
1885 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
1886 return SEN_RELEASED;
1887 if (is_empty(tgt_p, short_len))
1888 return SEN_USER_ERROR;
1889 tgt_p += short_len;
1890 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
1891 return SEN_RELEASED;
1892 if (is_empty(tgt_p, long_len))
1893 return SEN_USER_ERROR;
1894 tgt_p += long_len;
1895 tgt_p += pad_len;
1896 memset(tgt_p, 0xFF, mod_len);
1897 tgt_p += mod_len;
1898 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
1899 pubSec_p = (struct cca_public_sec *) tgt_p;
1900 pubSec_p->modulus_bit_len = 8 * mod_len;
1901 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1902
1903 return 0;
1904}
1905
1906static int
1907ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1908 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
1909 int dev_type)
1910{
1911 int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1912 int key_len, i;
1913 unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
1914 struct type6_hdr *tp6Hdr_p;
1915 struct CPRBX *cprbx_p;
1916 struct cca_public_key *key_p;
1917 struct T6_keyBlock_hdrX *keyb_p;
1918
1919 temp_exp = kmalloc(256, GFP_KERNEL);
1920 if (!temp_exp)
1921 return EGETBUFF;
1922 mod_len = icaMsg_p->inputdatalength;
1923 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1924 kfree(temp_exp);
1925 return SEN_RELEASED;
1926 }
1927 if (is_empty(temp_exp, mod_len)) {
1928 kfree(temp_exp);
1929 return SEN_USER_ERROR;
1930 }
1931 exp_p = temp_exp;
1932 for (i = 0; i < mod_len; i++)
1933 if (exp_p[i])
1934 break;
1935 if (i >= mod_len) {
1936 kfree(temp_exp);
1937 return SEN_USER_ERROR;
1938 }
1939 exp_len = mod_len - i;
1940 exp_p += i;
1941 PDEBUG("exp_len after computation: %08x\n", exp_len);
1942 tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
1943 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1944 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
1945 tmp_size = tmp_size + CALLER_HEADER;
1946 vud_len = 2 + mod_len;
1947 memset(z90cMsg_p, 0, tmp_size);
1948 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1949 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
1950 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1951 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
1952 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
1953 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1954 sizeof(static_PKE_function_code));
1955 tgt_p += sizeof(struct type6_hdr);
1956 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
1957 cprbx_p = (struct CPRBX *) tgt_p;
1958 cprbx_p->domain = (unsigned short)cdx;
1959 cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
1960 tgt_p += sizeof(struct CPRBX);
1961 if (dev_type == PCIXCC_MCL2)
1962 memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
1963 sizeof(struct function_and_rules_block));
1964 else
1965 memcpy(tgt_p, &static_pke_function_and_rulesX,
1966 sizeof(struct function_and_rules_block));
1967 tgt_p += sizeof(struct function_and_rules_block);
1968
1969 tgt_p += 2;
1970 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
1971 kfree(temp_exp);
1972 return SEN_RELEASED;
1973 }
1974 if (is_empty(tgt_p, mod_len)) {
1975 kfree(temp_exp);
1976 return SEN_USER_ERROR;
1977 }
1978 tgt_p -= 2;
1979 *((short *)tgt_p) = (short) vud_len;
1980 tgt_p += vud_len;
1981 keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
1982 tgt_p += sizeof(struct T6_keyBlock_hdrX);
1983 memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
1984 key_p = (struct cca_public_key *)tgt_p;
1985 temp = key_p->pubSec.exponent;
1986 memcpy(temp, exp_p, exp_len);
1987 kfree(temp_exp);
1988 temp += exp_len;
1989 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1990 return SEN_RELEASED;
1991 if (is_empty(temp, mod_len))
1992 return SEN_USER_ERROR;
1993 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1994 key_p->pubSec.modulus_byte_len = mod_len;
1995 key_p->pubSec.exponent_len = exp_len;
1996 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1997 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1998 key_p->pubHdr.token_length = key_len;
1999 key_len += 4;
2000 keyb_p->ulen = (unsigned short)key_len;
2001 key_len += 2;
2002 keyb_p->blen = (unsigned short)key_len;
2003 cprbx_p->req_parml = parmBlock_l;
2004 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2005
2006 return 0;
2007}
2008
2009static int
2010ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2011 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
2012 int dev_type)
2013{
2014 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
2015 int long_len, pad_len, keyPartsLen, tmp_l;
2016 unsigned char *tgt_p, *temp;
2017 struct type6_hdr *tp6Hdr_p;
2018 struct CPRBX *cprbx_p;
2019 struct cca_token_hdr *keyHdr_p;
2020 struct cca_pvt_ext_CRT_sec *pvtSec_p;
2021 struct cca_public_sec *pubSec_p;
2022
2023 mod_len = icaMsg_p->inputdatalength;
2024 short_len = mod_len / 2;
2025 long_len = 8 + short_len;
2026 keyPartsLen = 3 * long_len + 2 * short_len;
2027 pad_len = (8 - (keyPartsLen % 8)) % 8;
2028 keyPartsLen += pad_len + mod_len;
2029 tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
2030 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
2031 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
2032 vud_len = 2 + mod_len;
2033 tmp_size = tmp_size + CALLER_HEADER;
2034 memset(z90cMsg_p, 0, tmp_size);
2035 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
2036 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
2037 tp6Hdr_p = (struct type6_hdr *)tgt_p;
2038 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
2039 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
2040 tgt_p += sizeof(struct type6_hdr);
2041 cprbx_p = (struct CPRBX *) tgt_p;
2042 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
2043 cprbx_p->domain = (unsigned short)cdx;
2044 cprbx_p->req_parml = parmBlock_l;
2045 cprbx_p->rpl_msgbl = parmBlock_l;
2046 tgt_p += sizeof(struct CPRBX);
2047 if (dev_type == PCIXCC_MCL2)
2048 memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
2049 sizeof(struct function_and_rules_block));
2050 else
2051 memcpy(tgt_p, &static_pkd_function_and_rulesX,
2052 sizeof(struct function_and_rules_block));
2053 tgt_p += sizeof(struct function_and_rules_block);
2054 *((short *)tgt_p) = (short) vud_len;
2055 tgt_p += 2;
2056 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
2057 return SEN_RELEASED;
2058 if (is_empty(tgt_p, mod_len))
2059 return SEN_USER_ERROR;
2060 tgt_p += mod_len;
2061 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
2062 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
2063 *((short *)tgt_p) = (short) tmp_l;
2064 temp = tgt_p + 2;
2065 tmp_l -= 2;
2066 *((short *)temp) = (short) tmp_l;
2067 tgt_p += sizeof(struct T6_keyBlock_hdr);
2068 keyHdr_p = (struct cca_token_hdr *)tgt_p;
2069 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
2070 tmp_l -= 4;
2071 keyHdr_p->token_length = tmp_l;
2072 tgt_p += sizeof(struct cca_token_hdr);
2073 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
2074 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
2075 pvtSec_p->section_length =
2076 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
2077 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
2078 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
2079 pvtSec_p->p_len = long_len;
2080 pvtSec_p->q_len = short_len;
2081 pvtSec_p->dp_len = long_len;
2082 pvtSec_p->dq_len = short_len;
2083 pvtSec_p->u_len = long_len;
2084 pvtSec_p->mod_len = mod_len;
2085 pvtSec_p->pad_len = pad_len;
2086 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
2087 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
2088 return SEN_RELEASED;
2089 if (is_empty(tgt_p, long_len))
2090 return SEN_USER_ERROR;
2091 tgt_p += long_len;
2092 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
2093 return SEN_RELEASED;
2094 if (is_empty(tgt_p, short_len))
2095 return SEN_USER_ERROR;
2096 tgt_p += short_len;
2097 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
2098 return SEN_RELEASED;
2099 if (is_empty(tgt_p, long_len))
2100 return SEN_USER_ERROR;
2101 tgt_p += long_len;
2102 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
2103 return SEN_RELEASED;
2104 if (is_empty(tgt_p, short_len))
2105 return SEN_USER_ERROR;
2106 tgt_p += short_len;
2107 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
2108 return SEN_RELEASED;
2109 if (is_empty(tgt_p, long_len))
2110 return SEN_USER_ERROR;
2111 tgt_p += long_len;
2112 tgt_p += pad_len;
2113 memset(tgt_p, 0xFF, mod_len);
2114 tgt_p += mod_len;
2115 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
2116 pubSec_p = (struct cca_public_sec *) tgt_p;
2117 pubSec_p->modulus_bit_len = 8 * mod_len;
2118 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2119
2120 return 0;
2121}
2122
2123static int
2124ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2125 union type50_msg *z90cMsg_p)
2126{
2127 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2128 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2129 union type50_msg *tmp_type50_msg;
2130
2131 mod_len = icaMex_p->inputdatalength;
2132
2133 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2134 CALLER_HEADER;
2135
2136 memset(z90cMsg_p, 0, msg_size);
2137
2138 tmp_type50_msg = (union type50_msg *)
2139 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2140
2141 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2142
2143 if (mod_len <= 128) {
2144 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2145 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2146 mod_tgt = tmp_type50_msg->meb1.modulus;
2147 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2148 exp_tgt = tmp_type50_msg->meb1.exponent;
2149 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2150 inp_tgt = tmp_type50_msg->meb1.message;
2151 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2152 } else {
2153 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2154 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2155 mod_tgt = tmp_type50_msg->meb2.modulus;
2156 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2157 exp_tgt = tmp_type50_msg->meb2.exponent;
2158 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2159 inp_tgt = tmp_type50_msg->meb2.message;
2160 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2161 }
2162
2163 mod_tgt += (mod_tgt_len - mod_len);
2164 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2165 return SEN_RELEASED;
2166 if (is_empty(mod_tgt, mod_len))
2167 return SEN_USER_ERROR;
2168 exp_tgt += (exp_tgt_len - mod_len);
2169 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2170 return SEN_RELEASED;
2171 if (is_empty(exp_tgt, mod_len))
2172 return SEN_USER_ERROR;
2173 inp_tgt += (inp_tgt_len - mod_len);
2174 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2175 return SEN_RELEASED;
2176 if (is_empty(inp_tgt, mod_len))
2177 return SEN_USER_ERROR;
2178
2179 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2180
2181 return 0;
2182}
2183
2184static int
2185ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2186 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2187{
2188 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2189 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2190 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2191 temp[8];
2192 union type50_msg *tmp_type50_msg;
2193
2194 mod_len = icaMsg_p->inputdatalength;
2195 short_len = mod_len / 2;
2196 long_len = mod_len / 2 + 8;
2197 long_offset = 0;
2198
2199 if (long_len > 128) {
2200 memset(temp, 0x00, sizeof(temp));
2201 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2202 return SEN_RELEASED;
2203 if (!is_empty(temp, 8))
2204 return SEN_NOT_AVAIL;
2205 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2206 return SEN_RELEASED;
2207 if (!is_empty(temp, 8))
2208 return SEN_NOT_AVAIL;
2209 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2210 return SEN_RELEASED;
2211 if (!is_empty(temp, 8))
2212 return SEN_NOT_AVAIL;
2213 long_offset = long_len - 128;
2214 long_len = 128;
2215 }
2216
2217 tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2218 CALLER_HEADER;
2219
2220 memset(z90cMsg_p, 0, tmp_size);
2221
2222 tmp_type50_msg = (union type50_msg *)
2223 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2224
2225 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2226 if (long_len <= 64) {
2227 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2228 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2229 p_tgt = tmp_type50_msg->crb1.p;
2230 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2231 q_tgt = tmp_type50_msg->crb1.q;
2232 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2233 dp_tgt = tmp_type50_msg->crb1.dp;
2234 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2235 dq_tgt = tmp_type50_msg->crb1.dq;
2236 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2237 u_tgt = tmp_type50_msg->crb1.u;
2238 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2239 inp_tgt = tmp_type50_msg->crb1.message;
2240 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2241 } else {
2242 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2243 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2244 p_tgt = tmp_type50_msg->crb2.p;
2245 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2246 q_tgt = tmp_type50_msg->crb2.q;
2247 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2248 dp_tgt = tmp_type50_msg->crb2.dp;
2249 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2250 dq_tgt = tmp_type50_msg->crb2.dq;
2251 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2252 u_tgt = tmp_type50_msg->crb2.u;
2253 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2254 inp_tgt = tmp_type50_msg->crb2.message;
2255 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2256 }
2257
2258 p_tgt += (p_tgt_len - long_len);
2259 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2260 return SEN_RELEASED;
2261 if (is_empty(p_tgt, long_len))
2262 return SEN_USER_ERROR;
2263 q_tgt += (q_tgt_len - short_len);
2264 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2265 return SEN_RELEASED;
2266 if (is_empty(q_tgt, short_len))
2267 return SEN_USER_ERROR;
2268 dp_tgt += (dp_tgt_len - long_len);
2269 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2270 return SEN_RELEASED;
2271 if (is_empty(dp_tgt, long_len))
2272 return SEN_USER_ERROR;
2273 dq_tgt += (dq_tgt_len - short_len);
2274 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2275 return SEN_RELEASED;
2276 if (is_empty(dq_tgt, short_len))
2277 return SEN_USER_ERROR;
2278 u_tgt += (u_tgt_len - long_len);
2279 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2280 return SEN_RELEASED;
2281 if (is_empty(u_tgt, long_len))
2282 return SEN_USER_ERROR;
2283 inp_tgt += (inp_tgt_len - mod_len);
2284 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2285 return SEN_RELEASED;
2286 if (is_empty(inp_tgt, mod_len))
2287 return SEN_USER_ERROR;
2288
2289 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2290
2291 return 0;
2292}
2293
2294int
2295convert_request(unsigned char *buffer, int func, unsigned short function,
2296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
2297{
2298 if (dev_type == PCICA) {
2299 if (func == ICARSACRT)
2300 return ICACRT_msg_to_type4CRT_msg(
2301 (struct ica_rsa_modexpo_crt *) buffer,
2302 msg_l_p, (union type4_msg *) msg_p);
2303 else
2304 return ICAMEX_msg_to_type4MEX_msg(
2305 (struct ica_rsa_modexpo *) buffer,
2306 msg_l_p, (union type4_msg *) msg_p);
2307 }
2308 if (dev_type == PCICC) {
2309 if (func == ICARSACRT)
2310 return ICACRT_msg_to_type6CRT_msg(
2311 (struct ica_rsa_modexpo_crt *) buffer,
2312 cdx, msg_l_p, (struct type6_msg *)msg_p);
2313 if (function == PCI_FUNC_KEY_ENCRYPT)
2314 return ICAMEX_msg_to_type6MEX_en_msg(
2315 (struct ica_rsa_modexpo *) buffer,
2316 cdx, msg_l_p, (struct type6_msg *) msg_p);
2317 else
2318 return ICAMEX_msg_to_type6MEX_de_msg(
2319 (struct ica_rsa_modexpo *) buffer,
2320 cdx, msg_l_p, (struct type6_msg *) msg_p);
2321 }
2322 if ((dev_type == PCIXCC_MCL2) ||
2323 (dev_type == PCIXCC_MCL3) ||
2324 (dev_type == CEX2C)) {
2325 if (func == ICARSACRT)
2326 return ICACRT_msg_to_type6CRT_msgX(
2327 (struct ica_rsa_modexpo_crt *) buffer,
2328 cdx, msg_l_p, (struct type6_msg *) msg_p,
2329 dev_type);
2330 else
2331 return ICAMEX_msg_to_type6MEX_msgX(
2332 (struct ica_rsa_modexpo *) buffer,
2333 cdx, msg_l_p, (struct type6_msg *) msg_p,
2334 dev_type);
2335 }
2336 if (dev_type == CEX2A) {
2337 if (func == ICARSACRT)
2338 return ICACRT_msg_to_type50CRT_msg(
2339 (struct ica_rsa_modexpo_crt *) buffer,
2340 msg_l_p, (union type50_msg *) msg_p);
2341 else
2342 return ICAMEX_msg_to_type50MEX_msg(
2343 (struct ica_rsa_modexpo *) buffer,
2344 msg_l_p, (union type50_msg *) msg_p);
2345 }
2346
2347 return 0;
2348}
2349
2350int ext_bitlens_msg_count = 0;
2351static inline void
2352unset_ext_bitlens(void)
2353{
2354 if (!ext_bitlens_msg_count) {
2355 PRINTK("Unable to use coprocessors for extended bitlengths. "
2356 "Using PCICAs/CEX2As (if present) for extended "
2357 "bitlengths. This is not an error.\n");
2358 ext_bitlens_msg_count++;
2359 }
2360 ext_bitlens = 0;
2361}
2362
2363int
2364convert_response(unsigned char *response, unsigned char *buffer,
2365 int *respbufflen_p, unsigned char *resp_buff)
2366{
2367 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2368 struct error_hdr *errh_p = (struct error_hdr *) response;
2369 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2370 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2371 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2372 int reply_code, service_rc, service_rs, src_l;
2373 unsigned char *src_p, *tgt_p;
2374 struct CPRB *cprb_p;
2375 struct CPRBX *cprbx_p;
2376
2377 src_p = 0;
2378 reply_code = 0;
2379 service_rc = 0;
2380 service_rs = 0;
2381 src_l = 0;
2382 switch (errh_p->type) {
2383 case TYPE82_RSP_CODE:
2384 case TYPE88_RSP_CODE:
2385 reply_code = errh_p->reply_code;
2386 src_p = (unsigned char *)errh_p;
2387 PRINTK("Hardware error: Type %02X Message Header: "
2388 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2389 errh_p->type,
2390 src_p[0], src_p[1], src_p[2], src_p[3],
2391 src_p[4], src_p[5], src_p[6], src_p[7]);
2392 break;
2393 case TYPE80_RSP_CODE:
2394 src_l = icaMsg_p->outputdatalength;
2395 src_p = response + (int)t80h_p->len - src_l;
2396 break;
2397 case TYPE84_RSP_CODE:
2398 src_l = icaMsg_p->outputdatalength;
2399 src_p = response + (int)t84h_p->len - src_l;
2400 break;
2401 case TYPE86_RSP_CODE:
2402 reply_code = t86m_p->header.reply_code;
2403 if (reply_code != 0)
2404 break;
2405 cprb_p = (struct CPRB *)
2406 (response + sizeof(struct type86_fmt2_msg));
2407 cprbx_p = (struct CPRBX *) cprb_p;
2408 if (cprb_p->cprb_ver_id != 0x02) {
2409 le2toI(cprb_p->ccp_rtcode, &service_rc);
2410 if (service_rc != 0) {
2411 le2toI(cprb_p->ccp_rscode, &service_rs);
2412 if ((service_rc == 8) && (service_rs == 66))
2413 PDEBUG("Bad block format on PCICC\n");
2414 else if ((service_rc == 8) && (service_rs == 65))
2415 PDEBUG("Probably an even modulus on "
2416 "PCICC\n");
2417 else if ((service_rc == 8) && (service_rs == 770)) {
2418 PDEBUG("Invalid key length on PCICC\n");
2419 unset_ext_bitlens();
2420 return REC_USE_PCICA;
2421 }
2422 else if ((service_rc == 8) && (service_rs == 783)) {
2423 PDEBUG("Extended bitlengths not enabled"
2424 "on PCICC\n");
2425 unset_ext_bitlens();
2426 return REC_USE_PCICA;
2427 }
2428 else
2429 PRINTK("service rc/rs (PCICC): %d/%d\n",
2430 service_rc, service_rs);
2431 return REC_OPERAND_INV;
2432 }
2433 src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
2434 src_p += 4;
2435 le2toI(src_p, &src_l);
2436 src_l -= 2;
2437 src_p += 2;
2438 } else {
2439 service_rc = (int)cprbx_p->ccp_rtcode;
2440 if (service_rc != 0) {
2441 service_rs = (int) cprbx_p->ccp_rscode;
2442 if ((service_rc == 8) && (service_rs == 66))
2443 PDEBUG("Bad block format on PCIXCC\n");
2444 else if ((service_rc == 8) && (service_rs == 65))
2445 PDEBUG("Probably an even modulus on "
2446 "PCIXCC\n");
2447 else if ((service_rc == 8) && (service_rs == 770)) {
2448 PDEBUG("Invalid key length on PCIXCC\n");
2449 unset_ext_bitlens();
2450 return REC_USE_PCICA;
2451 }
2452 else if ((service_rc == 8) && (service_rs == 783)) {
2453 PDEBUG("Extended bitlengths not enabled"
2454 "on PCIXCC\n");
2455 unset_ext_bitlens();
2456 return REC_USE_PCICA;
2457 }
2458 else
2459 PRINTK("service rc/rs (PCIXCC): %d/%d\n",
2460 service_rc, service_rs);
2461 return REC_OPERAND_INV;
2462 }
2463 src_p = (unsigned char *)
2464 cprbx_p + sizeof(struct CPRBX);
2465 src_p += 4;
2466 src_l = (int)(*((short *) src_p));
2467 src_l -= 2;
2468 src_p += 2;
2469 }
2470 break;
2471 default:
2472 src_p = (unsigned char *)errh_p;
2473 PRINTK("Unrecognized Message Header: "
2474 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2475 src_p[0], src_p[1], src_p[2], src_p[3],
2476 src_p[4], src_p[5], src_p[6], src_p[7]);
2477 return REC_BAD_MESSAGE;
2478 }
2479
2480 if (reply_code)
2481 switch (reply_code) {
2482 case REP82_ERROR_MACHINE_FAILURE:
2483 if (errh_p->type == TYPE82_RSP_CODE)
2484 PRINTKW("Machine check failure\n");
2485 else
2486 PRINTKW("Module failure\n");
2487 return REC_HARDWAR_ERR;
2488 case REP82_ERROR_OPERAND_INVALID:
2489 return REC_OPERAND_INV;
2490 case REP88_ERROR_MESSAGE_MALFORMD:
2491 PRINTKW("Message malformed\n");
2492 return REC_OPERAND_INV;
2493 case REP82_ERROR_OPERAND_SIZE:
2494 return REC_OPERAND_SIZE;
2495 case REP82_ERROR_EVEN_MOD_IN_OPND:
2496 return REC_EVEN_MOD;
2497 case REP82_ERROR_MESSAGE_TYPE:
2498 return WRONG_DEVICE_TYPE;
2499 case REP82_ERROR_TRANSPORT_FAIL:
2500 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
2501 t86m_p->apfs[0], t86m_p->apfs[1],
2502 t86m_p->apfs[2], t86m_p->apfs[3]);
2503 return REC_HARDWAR_ERR;
2504 default:
2505 PRINTKW("reply code = %d\n", reply_code);
2506 return REC_HARDWAR_ERR;
2507 }
2508
2509 if (service_rc != 0)
2510 return REC_OPERAND_INV;
2511
2512 if ((src_l > icaMsg_p->outputdatalength) ||
2513 (src_l > RESPBUFFSIZE) ||
2514 (src_l <= 0))
2515 return REC_OPERAND_SIZE;
2516
2517 PDEBUG("Length returned = %d\n", src_l);
2518 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
2519 memcpy(tgt_p, src_p, src_l);
2520 if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
2521 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
2522 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
2523 return REC_INVALID_PAD;
2524 }
2525 *respbufflen_p = icaMsg_p->outputdatalength;
2526 if (*respbufflen_p == 0)
2527 PRINTK("Zero *respbufflen_p\n");
2528
2529 return 0;
2530}
2531
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
deleted file mode 100644
index b2f20ab8431a..000000000000
--- a/drivers/s390/crypto/z90main.c
+++ /dev/null
@@ -1,3379 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/miscdevice.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/proc_fs.h>
37#include <linux/syscalls.h>
38#include "z90crypt.h"
39#include "z90common.h"
40
41/**
42 * Defaults that may be modified.
43 */
44
45/**
46 * You can specify a different minor at compile time.
47 */
48#ifndef Z90CRYPT_MINOR
49#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
50#endif
51
52/**
53 * You can specify a different domain at compile time or on the insmod
54 * command line.
55 */
56#ifndef DOMAIN_INDEX
57#define DOMAIN_INDEX -1
58#endif
59
60/**
61 * This is the name under which the device is registered in /proc/modules.
62 */
63#define REG_NAME "z90crypt"
64
65/**
66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
67 * older than CLEANUPTIME seconds in the past.
68 */
69#ifndef CLEANUPTIME
70#define CLEANUPTIME 15
71#endif
72
73/**
74 * Config should run every CONFIGTIME seconds
75 */
76#ifndef CONFIGTIME
77#define CONFIGTIME 30
78#endif
79
80/**
81 * The first execution of the config task should take place
82 * immediately after initialization
83 */
84#ifndef INITIAL_CONFIGTIME
85#define INITIAL_CONFIGTIME 1
86#endif
87
88/**
89 * Reader should run every READERTIME milliseconds
90 * With the 100Hz patch for s390, z90crypt can lock the system solid while
91 * under heavy load. We'll try to avoid that.
92 */
93#ifndef READERTIME
94#if HZ > 1000
95#define READERTIME 2
96#else
97#define READERTIME 10
98#endif
99#endif
100
101/**
102 * turn long device array index into device pointer
103 */
104#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
105
106/**
107 * turn short device array index into long device array index
108 */
109#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
110
111/**
112 * turn short device array index into device pointer
113 */
114#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
115
116/**
117 * Status for a work-element
118 */
119#define STAT_DEFAULT 0x00 // request has not been processed
120
121#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
122 // else, device is determined each write
123#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
124 // before being sent to the hardware.
125#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
126// 0x20 // UNUSED state
127#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
128#define STAT_NOWORK 0x00 // bits off: no work on any queue
129#define STAT_RDWRMASK 0x30 // mask for bits 5-4
130
131/**
132 * Macros to check the status RDWRMASK
133 */
134#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
135#define SET_RDWRMASK(statbyte, newval) \
136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
137
138/**
139 * Audit Trail. Progress of a Work element
140 * audit[0]: Unless noted otherwise, these bits are all set by the process
141 */
142#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
143#define FP_BUFFREQ 0x40 // Low Level buffer requested
144#define FP_BUFFGOT 0x20 // Low Level buffer obtained
145#define FP_SENT 0x10 // Work element sent to a crypto device
146 // (may be set by process or by reader task)
147#define FP_PENDING 0x08 // Work element placed on pending queue
148 // (may be set by process or by reader task)
149#define FP_REQUEST 0x04 // Work element placed on request queue
150#define FP_ASLEEP 0x02 // Work element about to sleep
151#define FP_AWAKE 0x01 // Work element has been awakened
152
153/**
154 * audit[1]: These bits are set by the reader task and/or the cleanup task
155 */
156#define FP_NOTPENDING 0x80 // Work element removed from pending queue
157#define FP_AWAKENING 0x40 // Caller about to be awakened
158#define FP_TIMEDOUT 0x20 // Caller timed out
159#define FP_RESPSIZESET 0x10 // Response size copied to work element
160#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
161#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
162#define FP_REMREQUEST 0x02 // Work element removed from request queue
163#define FP_SIGNALED 0x01 // Work element was awakened by a signal
164
165/**
166 * audit[2]: unused
167 */
168
169/**
170 * state of the file handle in private_data.status
171 */
172#define STAT_OPEN 0
173#define STAT_CLOSED 1
174
175/**
176 * PID() expands to the process ID of the current process
177 */
178#define PID() (current->pid)
179
180/**
181 * Selected Constants. The number of APs and the number of devices
182 */
183#ifndef Z90CRYPT_NUM_APS
184#define Z90CRYPT_NUM_APS 64
185#endif
186#ifndef Z90CRYPT_NUM_DEVS
187#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
188#endif
189
190/**
191 * Buffer size for receiving responses. The maximum Response Size
192 * is actually the maximum request size, since in an error condition
193 * the request itself may be returned unchanged.
194 */
195#define MAX_RESPONSE_SIZE 0x0000077C
196
197/**
198 * A count and status-byte mask
199 */
200struct status {
201 int st_count; // # of enabled devices
202 int disabled_count; // # of disabled devices
203 int user_disabled_count; // # of devices disabled via proc fs
204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
205};
206
207/**
208 * The array of device indexes is a mechanism for fast indexing into
209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
211 * z90CDeviceIndex[2] is 47.
212 */
213struct device_x {
214 int device_index[Z90CRYPT_NUM_DEVS];
215};
216
217/**
218 * All devices are arranged in a single array: 64 APs
219 */
220struct device {
221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
222 // PCIXCC_MCL3, CEX2C, CEX2A
223 enum devstat dev_stat; // current device status
224 int dev_self_x; // Index in array
225 int disabled; // Set when device is in error
226 int user_disabled; // Set when device is disabled by user
227 int dev_q_depth; // q depth
228 unsigned char * dev_resp_p; // Response buffer address
229 int dev_resp_l; // Response Buffer length
230 int dev_caller_count; // Number of callers
231 int dev_total_req_cnt; // # requests for device since load
232 struct list_head dev_caller_list; // List of callers
233};
234
235/**
236 * There's a struct status and a struct device_x for each device type.
237 */
238struct hdware_block {
239 struct status hdware_mask;
240 struct status type_mask[Z90CRYPT_NUM_TYPES];
241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
242 unsigned char device_type_array[Z90CRYPT_NUM_APS];
243};
244
245/**
246 * z90crypt is the topmost data structure in the hierarchy.
247 */
248struct z90crypt {
249 int max_count; // Nr of possible crypto devices
250 struct status mask;
251 int q_depth_array[Z90CRYPT_NUM_DEVS];
252 int dev_type_array[Z90CRYPT_NUM_DEVS];
253 struct device_x overall_device_x; // array device indexes
254 struct device * device_p[Z90CRYPT_NUM_DEVS];
255 int terminating;
256 int domain_established;// TRUE: domain has been found
257 int cdx; // Crypto Domain Index
258 int len; // Length of this data structure
259 struct hdware_block *hdware_info;
260};
261
262/**
263 * An array of these structures is pointed to from dev_caller
264 * The length of the array depends on the device type. For APs,
265 * there are 8.
266 *
267 * The caller buffer is allocated to the user at OPEN. At WRITE,
268 * it contains the request; at READ, the response. The function
269 * send_to_crypto_device converts the request to device-dependent
270 * form and use the caller's OPEN-allocated buffer for the response.
271 *
272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
273 * because that points to it, see the discussion in z90hardware.c.
274 * Search for "extended request message block".
275 */
276struct caller {
277 int caller_buf_l; // length of original request
278 unsigned char * caller_buf_p; // Original request on WRITE
279 int caller_dev_dep_req_l; // len device dependent request
280 unsigned char * caller_dev_dep_req_p; // Device dependent form
281 unsigned char caller_id[8]; // caller-supplied message id
282 struct list_head caller_liste;
283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
284};
285
286/**
287 * Function prototypes from z90hardware.c
288 */
289enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
290 int *dev_type);
291enum devstat reset_device(int deviceNr, int cdx, int resetNr);
292enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
293enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
294 unsigned char *resp, unsigned char *psmid);
295int convert_request(unsigned char *buffer, int func, unsigned short function,
296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
297int convert_response(unsigned char *response, unsigned char *buffer,
298 int *respbufflen_p, unsigned char *resp_buff);
299
300/**
301 * Low level function prototypes
302 */
303static int create_z90crypt(int *cdx_p);
304static int refresh_z90crypt(int *cdx_p);
305static int find_crypto_devices(struct status *deviceMask);
306static int create_crypto_device(int index);
307static int destroy_crypto_device(int index);
308static void destroy_z90crypt(void);
309static int refresh_index_array(struct status *status_str,
310 struct device_x *index_array);
311static int probe_device_type(struct device *devPtr);
312static int probe_PCIXCC_type(struct device *devPtr);
313
314/**
315 * proc fs definitions
316 */
317static struct proc_dir_entry *z90crypt_entry;
318
319/**
320 * data structures
321 */
322
323/**
324 * work_element.opener points back to this structure
325 */
326struct priv_data {
327 pid_t opener_pid;
328 unsigned char status; // 0: open 1: closed
329};
330
331/**
332 * A work element is allocated for each request
333 */
334struct work_element {
335 struct priv_data *priv_data;
336 pid_t pid;
337 int devindex; // index of device processing this w_e
338 // (If request did not specify device,
339 // -1 until placed onto a queue)
340 int devtype;
341 struct list_head liste; // used for requestq and pendingq
342 char buffer[128]; // local copy of user request
343 int buff_size; // size of the buffer for the request
344 char resp_buff[RESPBUFFSIZE];
345 int resp_buff_size;
346 char __user * resp_addr; // address of response in user space
347 unsigned int funccode; // function code of request
348 wait_queue_head_t waitq;
349 unsigned long requestsent; // time at which the request was sent
350 atomic_t alarmrung; // wake-up signal
351 unsigned char caller_id[8]; // pid + counter, for this w_e
352 unsigned char status[1]; // bits to mark status of the request
353 unsigned char audit[3]; // record of work element's progress
354 unsigned char * requestptr; // address of request buffer
355 int retcode; // return code of request
356};
357
358/**
359 * High level function prototypes
360 */
361static int z90crypt_open(struct inode *, struct file *);
362static int z90crypt_release(struct inode *, struct file *);
363static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
364static ssize_t z90crypt_write(struct file *, const char __user *,
365 size_t, loff_t *);
366static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
367static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
368
369static void z90crypt_reader_task(unsigned long);
370static void z90crypt_schedule_reader_task(unsigned long);
371static void z90crypt_config_task(unsigned long);
372static void z90crypt_cleanup_task(unsigned long);
373
374static int z90crypt_status(char *, char **, off_t, int, int *, void *);
375static int z90crypt_status_write(struct file *, const char __user *,
376 unsigned long, void *);
377
378/**
379 * Storage allocated at initialization and used throughout the life of
380 * this insmod
381 */
382static int domain = DOMAIN_INDEX;
383static struct z90crypt z90crypt;
384static int quiesce_z90crypt;
385static spinlock_t queuespinlock;
386static struct list_head request_list;
387static int requestq_count;
388static struct list_head pending_list;
389static int pendingq_count;
390
391static struct tasklet_struct reader_tasklet;
392static struct timer_list reader_timer;
393static struct timer_list config_timer;
394static struct timer_list cleanup_timer;
395static atomic_t total_open;
396static atomic_t z90crypt_step;
397
398static struct file_operations z90crypt_fops = {
399 .owner = THIS_MODULE,
400 .read = z90crypt_read,
401 .write = z90crypt_write,
402 .unlocked_ioctl = z90crypt_unlocked_ioctl,
403#ifdef CONFIG_COMPAT
404 .compat_ioctl = z90crypt_compat_ioctl,
405#endif
406 .open = z90crypt_open,
407 .release = z90crypt_release
408};
409
410static struct miscdevice z90crypt_misc_device = {
411 .minor = Z90CRYPT_MINOR,
412 .name = DEV_NAME,
413 .fops = &z90crypt_fops,
414};
415
416/**
417 * Documentation values.
418 */
419MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
420 "and Jochen Roehrig");
421MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
422 "Copyright 2001, 2005 IBM Corporation");
423MODULE_LICENSE("GPL");
424module_param(domain, int, 0);
425MODULE_PARM_DESC(domain, "domain index for device");
426
427#ifdef CONFIG_COMPAT
428/**
429 * ioctl32 conversion routines
430 */
431struct ica_rsa_modexpo_32 { // For 32-bit callers
432 compat_uptr_t inputdata;
433 unsigned int inputdatalength;
434 compat_uptr_t outputdata;
435 unsigned int outputdatalength;
436 compat_uptr_t b_key;
437 compat_uptr_t n_modulus;
438};
439
440static long
441trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
442{
443 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
444 struct ica_rsa_modexpo_32 mex32k;
445 struct ica_rsa_modexpo __user *mex64;
446 long ret = 0;
447 unsigned int i;
448
449 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
450 return -EFAULT;
451 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
452 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
453 return -EFAULT;
454 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
455 return -EFAULT;
456 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
457 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
458 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
459 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
460 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
461 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
462 return -EFAULT;
463 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
464 if (!ret)
465 if (__get_user(i, &mex64->outputdatalength) ||
466 __put_user(i, &mex32u->outputdatalength))
467 ret = -EFAULT;
468 return ret;
469}
470
471struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
472 compat_uptr_t inputdata;
473 unsigned int inputdatalength;
474 compat_uptr_t outputdata;
475 unsigned int outputdatalength;
476 compat_uptr_t bp_key;
477 compat_uptr_t bq_key;
478 compat_uptr_t np_prime;
479 compat_uptr_t nq_prime;
480 compat_uptr_t u_mult_inv;
481};
482
483static long
484trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
485{
486 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
487 struct ica_rsa_modexpo_crt_32 crt32k;
488 struct ica_rsa_modexpo_crt __user *crt64;
489 long ret = 0;
490 unsigned int i;
491
492 if (!access_ok(VERIFY_WRITE, crt32u,
493 sizeof(struct ica_rsa_modexpo_crt_32)))
494 return -EFAULT;
495 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
496 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
497 return -EFAULT;
498 if (copy_from_user(&crt32k, crt32u,
499 sizeof(struct ica_rsa_modexpo_crt_32)))
500 return -EFAULT;
501 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
502 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
503 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
504 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
505 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
506 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
507 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
508 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
509 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
510 return -EFAULT;
511 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
512 if (!ret)
513 if (__get_user(i, &crt64->outputdatalength) ||
514 __put_user(i, &crt32u->outputdatalength))
515 ret = -EFAULT;
516 return ret;
517}
518
519static long
520z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
521{
522 switch (cmd) {
523 case ICAZ90STATUS:
524 case Z90QUIESCE:
525 case Z90STAT_TOTALCOUNT:
526 case Z90STAT_PCICACOUNT:
527 case Z90STAT_PCICCCOUNT:
528 case Z90STAT_PCIXCCCOUNT:
529 case Z90STAT_PCIXCCMCL2COUNT:
530 case Z90STAT_PCIXCCMCL3COUNT:
531 case Z90STAT_CEX2CCOUNT:
532 case Z90STAT_REQUESTQ_COUNT:
533 case Z90STAT_PENDINGQ_COUNT:
534 case Z90STAT_TOTALOPEN_COUNT:
535 case Z90STAT_DOMAIN_INDEX:
536 case Z90STAT_STATUS_MASK:
537 case Z90STAT_QDEPTH_MASK:
538 case Z90STAT_PERDEV_REQCNT:
539 return z90crypt_unlocked_ioctl(filp, cmd, arg);
540 case ICARSAMODEXPO:
541 return trans_modexpo32(filp, cmd, arg);
542 case ICARSACRT:
543 return trans_modexpo_crt32(filp, cmd, arg);
544 default:
545 return -ENOIOCTLCMD;
546 }
547}
548#endif
549
550/**
551 * The module initialization code.
552 */
553static int __init
554z90crypt_init_module(void)
555{
556 int result, nresult;
557 struct proc_dir_entry *entry;
558
559 PDEBUG("PID %d\n", PID());
560
561 if ((domain < -1) || (domain > 15)) {
562 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
563 return -EINVAL;
564 }
565
566 /* Register as misc device with given minor (or get a dynamic one). */
567 result = misc_register(&z90crypt_misc_device);
568 if (result < 0) {
569 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
570 z90crypt_misc_device.minor, result);
571 return result;
572 }
573
574 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
575
576 result = create_z90crypt(&domain);
577 if (result != 0) {
578 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
579 domain, result);
580 result = -ENOMEM;
581 goto init_module_cleanup;
582 }
583
584 if (result == 0) {
585 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
586 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
587 __DATE__, __TIME__);
588 PDEBUG("create_z90crypt (domain index %d) successful.\n",
589 domain);
590 } else
591 PRINTK("No devices at startup\n");
592
593 /* Initialize globals. */
594 spin_lock_init(&queuespinlock);
595
596 INIT_LIST_HEAD(&pending_list);
597 pendingq_count = 0;
598
599 INIT_LIST_HEAD(&request_list);
600 requestq_count = 0;
601
602 quiesce_z90crypt = 0;
603
604 atomic_set(&total_open, 0);
605 atomic_set(&z90crypt_step, 0);
606
607 /* Set up the cleanup task. */
608 init_timer(&cleanup_timer);
609 cleanup_timer.function = z90crypt_cleanup_task;
610 cleanup_timer.data = 0;
611 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
612 add_timer(&cleanup_timer);
613
614 /* Set up the proc file system */
615 entry = create_proc_entry("driver/z90crypt", 0644, 0);
616 if (entry) {
617 entry->nlink = 1;
618 entry->data = 0;
619 entry->read_proc = z90crypt_status;
620 entry->write_proc = z90crypt_status_write;
621 }
622 else
623 PRINTK("Couldn't create z90crypt proc entry\n");
624 z90crypt_entry = entry;
625
626 /* Set up the configuration task. */
627 init_timer(&config_timer);
628 config_timer.function = z90crypt_config_task;
629 config_timer.data = 0;
630 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
631 add_timer(&config_timer);
632
633 /* Set up the reader task */
634 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
635 init_timer(&reader_timer);
636 reader_timer.function = z90crypt_schedule_reader_task;
637 reader_timer.data = 0;
638 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
639 add_timer(&reader_timer);
640
641 return 0; // success
642
643init_module_cleanup:
644 if ((nresult = misc_deregister(&z90crypt_misc_device)))
645 PRINTK("misc_deregister failed with %d.\n", nresult);
646 else
647 PDEBUG("misc_deregister successful.\n");
648
649 return result; // failure
650}
651
652/**
653 * The module termination code
654 */
655static void __exit
656z90crypt_cleanup_module(void)
657{
658 int nresult;
659
660 PDEBUG("PID %d\n", PID());
661
662 remove_proc_entry("driver/z90crypt", 0);
663
664 if ((nresult = misc_deregister(&z90crypt_misc_device)))
665 PRINTK("misc_deregister failed with %d.\n", nresult);
666 else
667 PDEBUG("misc_deregister successful.\n");
668
669 /* Remove the tasks */
670 tasklet_kill(&reader_tasklet);
671 del_timer(&reader_timer);
672 del_timer(&config_timer);
673 del_timer(&cleanup_timer);
674
675 destroy_z90crypt();
676
677 PRINTKN("Unloaded.\n");
678}
679
680/**
681 * Functions running under a process id
682 *
683 * The I/O functions:
684 * z90crypt_open
685 * z90crypt_release
686 * z90crypt_read
687 * z90crypt_write
688 * z90crypt_unlocked_ioctl
689 * z90crypt_status
690 * z90crypt_status_write
691 * disable_card
692 * enable_card
693 *
694 * Helper functions:
695 * z90crypt_rsa
696 * z90crypt_prepare
697 * z90crypt_send
698 * z90crypt_process_results
699 *
700 */
701static int
702z90crypt_open(struct inode *inode, struct file *filp)
703{
704 struct priv_data *private_data_p;
705
706 if (quiesce_z90crypt)
707 return -EQUIESCE;
708
709 private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL);
710 if (!private_data_p) {
711 PRINTK("Memory allocate failed\n");
712 return -ENOMEM;
713 }
714
715 private_data_p->status = STAT_OPEN;
716 private_data_p->opener_pid = PID();
717 filp->private_data = private_data_p;
718 atomic_inc(&total_open);
719
720 return 0;
721}
722
723static int
724z90crypt_release(struct inode *inode, struct file *filp)
725{
726 struct priv_data *private_data_p = filp->private_data;
727
728 PDEBUG("PID %d (filp %p)\n", PID(), filp);
729
730 private_data_p->status = STAT_CLOSED;
731 memset(private_data_p, 0, sizeof(struct priv_data));
732 kfree(private_data_p);
733 atomic_dec(&total_open);
734
735 return 0;
736}
737
738/*
739 * there are two read functions, of which compile options will choose one
740 * without USE_GET_RANDOM_BYTES
741 * => read() always returns -EPERM;
742 * otherwise
743 * => read() uses get_random_bytes() kernel function
744 */
745#ifndef USE_GET_RANDOM_BYTES
746/**
747 * z90crypt_read will not be supported beyond z90crypt 1.3.1
748 */
749static ssize_t
750z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
751{
752 PDEBUG("filp %p (PID %d)\n", filp, PID());
753 return -EPERM;
754}
755#else // we want to use get_random_bytes
756/**
757 * read() just returns a string of random bytes. Since we have no way
758 * to generate these cryptographically, we just execute get_random_bytes
759 * for the length specified.
760 */
761#include <linux/random.h>
762static ssize_t
763z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
764{
765 unsigned char *temp_buff;
766
767 PDEBUG("filp %p (PID %d)\n", filp, PID());
768
769 if (quiesce_z90crypt)
770 return -EQUIESCE;
771 if (count < 0) {
772 PRINTK("Requested random byte count negative: %ld\n", count);
773 return -EINVAL;
774 }
775 if (count > RESPBUFFSIZE) {
776 PDEBUG("count[%d] > RESPBUFFSIZE", count);
777 return -EINVAL;
778 }
779 if (count == 0)
780 return 0;
781 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
782 if (!temp_buff) {
783 PRINTK("Memory allocate failed\n");
784 return -ENOMEM;
785 }
786 get_random_bytes(temp_buff, count);
787
788 if (copy_to_user(buf, temp_buff, count) != 0) {
789 kfree(temp_buff);
790 return -EFAULT;
791 }
792 kfree(temp_buff);
793 return count;
794}
795#endif
796
797/**
798 * Write is is not allowed
799 */
800static ssize_t
801z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
802{
803 PDEBUG("filp %p (PID %d)\n", filp, PID());
804 return -EPERM;
805}
806
807/**
808 * New status functions
809 */
810static inline int
811get_status_totalcount(void)
812{
813 return z90crypt.hdware_info->hdware_mask.st_count;
814}
815
816static inline int
817get_status_PCICAcount(void)
818{
819 return z90crypt.hdware_info->type_mask[PCICA].st_count;
820}
821
822static inline int
823get_status_PCICCcount(void)
824{
825 return z90crypt.hdware_info->type_mask[PCICC].st_count;
826}
827
828static inline int
829get_status_PCIXCCcount(void)
830{
831 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
832 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
833}
834
835static inline int
836get_status_PCIXCCMCL2count(void)
837{
838 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
839}
840
841static inline int
842get_status_PCIXCCMCL3count(void)
843{
844 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
845}
846
847static inline int
848get_status_CEX2Ccount(void)
849{
850 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
851}
852
853static inline int
854get_status_CEX2Acount(void)
855{
856 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
857}
858
859static inline int
860get_status_requestq_count(void)
861{
862 return requestq_count;
863}
864
865static inline int
866get_status_pendingq_count(void)
867{
868 return pendingq_count;
869}
870
871static inline int
872get_status_totalopen_count(void)
873{
874 return atomic_read(&total_open);
875}
876
877static inline int
878get_status_domain_index(void)
879{
880 return z90crypt.cdx;
881}
882
883static inline unsigned char *
884get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
885{
886 int i, ix;
887
888 memcpy(status, z90crypt.hdware_info->device_type_array,
889 Z90CRYPT_NUM_APS);
890
891 for (i = 0; i < get_status_totalcount(); i++) {
892 ix = SHRT2LONG(i);
893 if (LONG2DEVPTR(ix)->user_disabled)
894 status[ix] = 0x0d;
895 }
896
897 return status;
898}
899
900static inline unsigned char *
901get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
902{
903 int i, ix;
904
905 memset(qdepth, 0, Z90CRYPT_NUM_APS);
906
907 for (i = 0; i < get_status_totalcount(); i++) {
908 ix = SHRT2LONG(i);
909 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
910 }
911
912 return qdepth;
913}
914
915static inline unsigned int *
916get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
917{
918 int i, ix;
919
920 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
921
922 for (i = 0; i < get_status_totalcount(); i++) {
923 ix = SHRT2LONG(i);
924 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
925 }
926
927 return reqcnt;
928}
929
930static inline void
931init_work_element(struct work_element *we_p,
932 struct priv_data *priv_data, pid_t pid)
933{
934 int step;
935
936 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
937 /* Come up with a unique id for this caller. */
938 step = atomic_inc_return(&z90crypt_step);
939 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
940 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
941 we_p->pid = pid;
942 we_p->priv_data = priv_data;
943 we_p->status[0] = STAT_DEFAULT;
944 we_p->audit[0] = 0x00;
945 we_p->audit[1] = 0x00;
946 we_p->audit[2] = 0x00;
947 we_p->resp_buff_size = 0;
948 we_p->retcode = 0;
949 we_p->devindex = -1;
950 we_p->devtype = -1;
951 atomic_set(&we_p->alarmrung, 0);
952 init_waitqueue_head(&we_p->waitq);
953 INIT_LIST_HEAD(&(we_p->liste));
954}
955
956static inline int
957allocate_work_element(struct work_element **we_pp,
958 struct priv_data *priv_data_p, pid_t pid)
959{
960 struct work_element *we_p;
961
962 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
963 if (!we_p)
964 return -ENOMEM;
965 init_work_element(we_p, priv_data_p, pid);
966 *we_pp = we_p;
967 return 0;
968}
969
970static inline void
971remove_device(struct device *device_p)
972{
973 if (!device_p || (device_p->disabled != 0))
974 return;
975 device_p->disabled = 1;
976 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
977 z90crypt.hdware_info->hdware_mask.disabled_count++;
978}
979
980/**
981 * Bitlength limits for each card
982 *
983 * There are new MCLs which allow more bitlengths. See the table for details.
984 * The MCL must be applied and the newer bitlengths enabled for these to work.
985 *
986 * Card Type Old limit New limit
987 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
988 * PCICC 512-1024 512-2048
989 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
990 * PCIXCC_MCL3 ----- 128-2048
991 * CEX2C 512-2048 128-2048
992 * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
993 *
994 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
995 * MCL to just one card in a machine. We assume, at first, that all cards have
996 * these capabilities.
997 */
998int ext_bitlens = 1; // This is global
999#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1000#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1001#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1002#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1003#define MAX_MOD_SIZE 256 // 2048 bits
1004
1005static inline int
1006select_device_type(int *dev_type_p, int bytelength)
1007{
1008 static int count = 0;
1009 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1010 index_to_use;
1011 struct status *stat;
1012 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1013 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1014 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1015 (*dev_type_p != ANYDEV))
1016 return -1;
1017 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1019 if (stat->st_count >
1020 (stat->disabled_count + stat->user_disabled_count))
1021 return 0;
1022 return -1;
1023 }
1024
1025 /**
1026 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1027 * speed.
1028 *
1029 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1030 * other present.
1031 */
1032 stat = &z90crypt.hdware_info->type_mask[PCICA];
1033 PCICA_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count);
1035 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1036 PCIXCC_MCL3_avail = stat->st_count -
1037 (stat->disabled_count + stat->user_disabled_count);
1038 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1039 CEX2C_avail = stat->st_count -
1040 (stat->disabled_count + stat->user_disabled_count);
1041 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1042 CEX2A_avail = stat->st_count -
1043 (stat->disabled_count + stat->user_disabled_count);
1044 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1045 /**
1046 * bitlength is a factor, PCICA or CEX2A are the most capable,
1047 * even with the new MCL for PCIXCC.
1048 */
1049 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1050 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1051 if (PCICA_avail) {
1052 *dev_type_p = PCICA;
1053 return 0;
1054 }
1055 if (CEX2A_avail) {
1056 *dev_type_p = CEX2A;
1057 return 0;
1058 }
1059 return -1;
1060 }
1061
1062 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1063 CEX2C_avail + CEX2A_avail);
1064 if (index_to_use < PCICA_avail)
1065 *dev_type_p = PCICA;
1066 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1067 *dev_type_p = PCIXCC_MCL3;
1068 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1069 CEX2C_avail))
1070 *dev_type_p = CEX2C;
1071 else
1072 *dev_type_p = CEX2A;
1073 count++;
1074 return 0;
1075 }
1076
1077 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1078 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1079 return -1;
1080 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1081 if (stat->st_count >
1082 (stat->disabled_count + stat->user_disabled_count)) {
1083 *dev_type_p = PCIXCC_MCL2;
1084 return 0;
1085 }
1086
1087 /**
1088 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1089 * (if we don't have the MCL applied and the newer bitlengths enabled)
1090 * cannot go to a PCICC
1091 */
1092 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1093 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1094 return -1;
1095 }
1096 stat = &z90crypt.hdware_info->type_mask[PCICC];
1097 if (stat->st_count >
1098 (stat->disabled_count + stat->user_disabled_count)) {
1099 *dev_type_p = PCICC;
1100 return 0;
1101 }
1102
1103 return -1;
1104}
1105
1106/**
1107 * Try the selected number, then the selected type (can be ANYDEV)
1108 */
1109static inline int
1110select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1111{
1112 int i, indx, devTp, low_count, low_indx;
1113 struct device_x *index_p;
1114 struct device *dev_ptr;
1115
1116 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1117 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1118 PDEBUG("trying index = %d\n", *device_nr_p);
1119 dev_ptr = z90crypt.device_p[*device_nr_p];
1120
1121 if (dev_ptr &&
1122 (dev_ptr->dev_stat != DEV_GONE) &&
1123 (dev_ptr->disabled == 0) &&
1124 (dev_ptr->user_disabled == 0)) {
1125 PDEBUG("selected by number, index = %d\n",
1126 *device_nr_p);
1127 *dev_type_p = dev_ptr->dev_type;
1128 return *device_nr_p;
1129 }
1130 }
1131 *device_nr_p = -1;
1132 PDEBUG("trying type = %d\n", *dev_type_p);
1133 devTp = *dev_type_p;
1134 if (select_device_type(&devTp, bytelength) == -1) {
1135 PDEBUG("failed to select by type\n");
1136 return -1;
1137 }
1138 PDEBUG("selected type = %d\n", devTp);
1139 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1140 low_count = 0x0000FFFF;
1141 low_indx = -1;
1142 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1143 indx = index_p->device_index[i];
1144 dev_ptr = z90crypt.device_p[indx];
1145 if (dev_ptr &&
1146 (dev_ptr->dev_stat != DEV_GONE) &&
1147 (dev_ptr->disabled == 0) &&
1148 (dev_ptr->user_disabled == 0) &&
1149 (devTp == dev_ptr->dev_type) &&
1150 (low_count > dev_ptr->dev_caller_count)) {
1151 low_count = dev_ptr->dev_caller_count;
1152 low_indx = indx;
1153 }
1154 }
1155 *device_nr_p = low_indx;
1156 return low_indx;
1157}
1158
1159static inline int
1160send_to_crypto_device(struct work_element *we_p)
1161{
1162 struct caller *caller_p;
1163 struct device *device_p;
1164 int dev_nr;
1165 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1166
1167 if (!we_p->requestptr)
1168 return SEN_FATAL_ERROR;
1169 caller_p = (struct caller *)we_p->requestptr;
1170 dev_nr = we_p->devindex;
1171 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1172 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1173 return SEN_RETRY;
1174 else
1175 return SEN_NOT_AVAIL;
1176 }
1177 we_p->devindex = dev_nr;
1178 device_p = z90crypt.device_p[dev_nr];
1179 if (!device_p)
1180 return SEN_NOT_AVAIL;
1181 if (device_p->dev_type != we_p->devtype)
1182 return SEN_RETRY;
1183 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1184 return SEN_QUEUE_FULL;
1185 PDEBUG("device number prior to send: %d\n", dev_nr);
1186 switch (send_to_AP(dev_nr, z90crypt.cdx,
1187 caller_p->caller_dev_dep_req_l,
1188 caller_p->caller_dev_dep_req_p)) {
1189 case DEV_SEN_EXCEPTION:
1190 PRINTKC("Exception during send to device %d\n", dev_nr);
1191 z90crypt.terminating = 1;
1192 return SEN_FATAL_ERROR;
1193 case DEV_GONE:
1194 PRINTK("Device %d not available\n", dev_nr);
1195 remove_device(device_p);
1196 return SEN_NOT_AVAIL;
1197 case DEV_EMPTY:
1198 return SEN_NOT_AVAIL;
1199 case DEV_NO_WORK:
1200 return SEN_FATAL_ERROR;
1201 case DEV_BAD_MESSAGE:
1202 return SEN_USER_ERROR;
1203 case DEV_QUEUE_FULL:
1204 return SEN_QUEUE_FULL;
1205 default:
1206 case DEV_ONLINE:
1207 break;
1208 }
1209 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1210 device_p->dev_caller_count++;
1211 return 0;
1212}
1213
1214/**
1215 * Send puts the user's work on one of two queues:
1216 * the pending queue if the send was successful
1217 * the request queue if the send failed because device full or busy
1218 */
1219static inline int
1220z90crypt_send(struct work_element *we_p, const char *buf)
1221{
1222 int rv;
1223
1224 PDEBUG("PID %d\n", PID());
1225
1226 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1227 PDEBUG("PID %d tried to send more work but has outstanding "
1228 "work.\n", PID());
1229 return -EWORKPEND;
1230 }
1231 we_p->devindex = -1; // Reset device number
1232 spin_lock_irq(&queuespinlock);
1233 rv = send_to_crypto_device(we_p);
1234 switch (rv) {
1235 case 0:
1236 we_p->requestsent = jiffies;
1237 we_p->audit[0] |= FP_SENT;
1238 list_add_tail(&we_p->liste, &pending_list);
1239 ++pendingq_count;
1240 we_p->audit[0] |= FP_PENDING;
1241 break;
1242 case SEN_BUSY:
1243 case SEN_QUEUE_FULL:
1244 rv = 0;
1245 we_p->devindex = -1; // any device will do
1246 we_p->requestsent = jiffies;
1247 list_add_tail(&we_p->liste, &request_list);
1248 ++requestq_count;
1249 we_p->audit[0] |= FP_REQUEST;
1250 break;
1251 case SEN_RETRY:
1252 rv = -ERESTARTSYS;
1253 break;
1254 case SEN_NOT_AVAIL:
1255 PRINTK("*** No devices available.\n");
1256 rv = we_p->retcode = -ENODEV;
1257 we_p->status[0] |= STAT_FAILED;
1258 break;
1259 case REC_OPERAND_INV:
1260 case REC_OPERAND_SIZE:
1261 case REC_EVEN_MOD:
1262 case REC_INVALID_PAD:
1263 rv = we_p->retcode = -EINVAL;
1264 we_p->status[0] |= STAT_FAILED;
1265 break;
1266 default:
1267 we_p->retcode = rv;
1268 we_p->status[0] |= STAT_FAILED;
1269 break;
1270 }
1271 if (rv != -ERESTARTSYS)
1272 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1273 spin_unlock_irq(&queuespinlock);
1274 if (rv == 0)
1275 tasklet_schedule(&reader_tasklet);
1276 return rv;
1277}
1278
1279/**
1280 * process_results copies the user's work from kernel space.
1281 */
1282static inline int
1283z90crypt_process_results(struct work_element *we_p, char __user *buf)
1284{
1285 int rv;
1286
1287 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1288
1289 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1290 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1291
1292 rv = 0;
1293 if (!we_p->buffer) {
1294 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1295 we_p, PID());
1296 rv = -ENOBUFF;
1297 }
1298
1299 if (!rv)
1300 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1301 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1302 rv = -EFAULT;
1303 }
1304
1305 if (!rv)
1306 rv = we_p->retcode;
1307 if (!rv)
1308 if (we_p->resp_buff_size
1309 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1310 we_p->resp_buff_size))
1311 rv = -EFAULT;
1312
1313 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1314 return rv;
1315}
1316
1317static unsigned char NULL_psmid[8] =
1318{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1319
1320/**
1321 * Used in device configuration functions
1322 */
1323#define MAX_RESET 90
1324
1325/**
1326 * This is used only for PCICC support
1327 */
1328static inline int
1329is_PKCS11_padded(unsigned char *buffer, int length)
1330{
1331 int i;
1332 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1333 return 0;
1334 for (i = 2; i < length; i++)
1335 if (buffer[i] != 0xFF)
1336 break;
1337 if ((i < 10) || (i == length))
1338 return 0;
1339 if (buffer[i] != 0x00)
1340 return 0;
1341 return 1;
1342}
1343
1344/**
1345 * This is used only for PCICC support
1346 */
1347static inline int
1348is_PKCS12_padded(unsigned char *buffer, int length)
1349{
1350 int i;
1351 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1352 return 0;
1353 for (i = 2; i < length; i++)
1354 if (buffer[i] == 0x00)
1355 break;
1356 if ((i < 10) || (i == length))
1357 return 0;
1358 if (buffer[i] != 0x00)
1359 return 0;
1360 return 1;
1361}
1362
1363/**
1364 * builds struct caller and converts message from generic format to
1365 * device-dependent format
1366 * func is ICARSAMODEXPO or ICARSACRT
1367 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1368 */
1369static inline int
1370build_caller(struct work_element *we_p, short function)
1371{
1372 int rv;
1373 struct caller *caller_p = (struct caller *)we_p->requestptr;
1374
1375 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1376 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1377 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1378 return SEN_NOT_AVAIL;
1379
1380 memcpy(caller_p->caller_id, we_p->caller_id,
1381 sizeof(caller_p->caller_id));
1382 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1383 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1384 caller_p->caller_buf_p = we_p->buffer;
1385 INIT_LIST_HEAD(&(caller_p->caller_liste));
1386
1387 rv = convert_request(we_p->buffer, we_p->funccode, function,
1388 z90crypt.cdx, we_p->devtype,
1389 &caller_p->caller_dev_dep_req_l,
1390 caller_p->caller_dev_dep_req_p);
1391 if (rv) {
1392 if (rv == SEN_NOT_AVAIL)
1393 PDEBUG("request can't be processed on hdwr avail\n");
1394 else
1395 PRINTK("Error from convert_request: %d\n", rv);
1396 }
1397 else
1398 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1399 return rv;
1400}
1401
1402static inline void
1403unbuild_caller(struct device *device_p, struct caller *caller_p)
1404{
1405 if (!caller_p)
1406 return;
1407 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1408 if (!list_empty(&caller_p->caller_liste)) {
1409 list_del_init(&caller_p->caller_liste);
1410 device_p->dev_caller_count--;
1411 }
1412 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1413}
1414
1415static inline int
1416get_crypto_request_buffer(struct work_element *we_p)
1417{
1418 struct ica_rsa_modexpo *mex_p;
1419 struct ica_rsa_modexpo_crt *crt_p;
1420 unsigned char *temp_buffer;
1421 short function;
1422 int rv;
1423
1424 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1425 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1426
1427 PDEBUG("device type input = %d\n", we_p->devtype);
1428
1429 if (z90crypt.terminating)
1430 return REC_NO_RESPONSE;
1431 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1432 PRINTK("psmid zeroes\n");
1433 return SEN_FATAL_ERROR;
1434 }
1435 if (!we_p->buffer) {
1436 PRINTK("buffer pointer NULL\n");
1437 return SEN_USER_ERROR;
1438 }
1439 if (!we_p->requestptr) {
1440 PRINTK("caller pointer NULL\n");
1441 return SEN_USER_ERROR;
1442 }
1443
1444 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1445 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1446 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1447 (we_p->devtype != ANYDEV)) {
1448 PRINTK("invalid device type\n");
1449 return SEN_USER_ERROR;
1450 }
1451
1452 if ((mex_p->inputdatalength < 1) ||
1453 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1454 PRINTK("inputdatalength[%d] is not valid\n",
1455 mex_p->inputdatalength);
1456 return SEN_USER_ERROR;
1457 }
1458
1459 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1460 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1461 mex_p->outputdatalength, mex_p->inputdatalength);
1462 return SEN_USER_ERROR;
1463 }
1464
1465 if (!mex_p->inputdata || !mex_p->outputdata) {
1466 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1467 mex_p->outputdata, mex_p->inputdata);
1468 return SEN_USER_ERROR;
1469 }
1470
1471 /**
1472 * As long as outputdatalength is big enough, we can set the
1473 * outputdatalength equal to the inputdatalength, since that is the
1474 * number of bytes we will copy in any case
1475 */
1476 mex_p->outputdatalength = mex_p->inputdatalength;
1477
1478 rv = 0;
1479 switch (we_p->funccode) {
1480 case ICARSAMODEXPO:
1481 if (!mex_p->b_key || !mex_p->n_modulus)
1482 rv = SEN_USER_ERROR;
1483 break;
1484 case ICARSACRT:
1485 if (!IS_EVEN(crt_p->inputdatalength)) {
1486 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1487 crt_p->inputdatalength);
1488 rv = SEN_USER_ERROR;
1489 break;
1490 }
1491 if (!crt_p->bp_key ||
1492 !crt_p->bq_key ||
1493 !crt_p->np_prime ||
1494 !crt_p->nq_prime ||
1495 !crt_p->u_mult_inv) {
1496 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1497 crt_p->bp_key, crt_p->bq_key,
1498 crt_p->np_prime, crt_p->nq_prime,
1499 crt_p->u_mult_inv);
1500 rv = SEN_USER_ERROR;
1501 }
1502 break;
1503 default:
1504 PRINTK("bad func = %d\n", we_p->funccode);
1505 rv = SEN_USER_ERROR;
1506 break;
1507 }
1508 if (rv != 0)
1509 return rv;
1510
1511 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1512 return SEN_NOT_AVAIL;
1513
1514 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1515 sizeof(struct caller);
1516 if (copy_from_user(temp_buffer, mex_p->inputdata,
1517 mex_p->inputdatalength) != 0)
1518 return SEN_RELEASED;
1519
1520 function = PCI_FUNC_KEY_ENCRYPT;
1521 switch (we_p->devtype) {
1522 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1523 case PCICA:
1524 case CEX2A:
1525 function = PCI_FUNC_KEY_ENCRYPT;
1526 break;
1527 /**
1528 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1529 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1530 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1531 * mod-expo operation
1532 */
1533 case PCIXCC_MCL2:
1534 if (we_p->funccode == ICARSAMODEXPO)
1535 function = PCI_FUNC_KEY_ENCRYPT;
1536 else
1537 function = PCI_FUNC_KEY_DECRYPT;
1538 break;
1539 case PCIXCC_MCL3:
1540 case CEX2C:
1541 if (we_p->funccode == ICARSAMODEXPO)
1542 function = PCI_FUNC_KEY_ENCRYPT;
1543 else
1544 function = PCI_FUNC_KEY_DECRYPT;
1545 break;
1546 /**
1547 * PCICC does everything as a PKCS-1.2 format request
1548 */
1549 case PCICC:
1550 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1551 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1552 return SEN_NOT_AVAIL;
1553 }
1554 if (we_p->funccode == ICARSAMODEXPO) {
1555 if (is_PKCS12_padded(temp_buffer,
1556 mex_p->inputdatalength))
1557 function = PCI_FUNC_KEY_ENCRYPT;
1558 else
1559 function = PCI_FUNC_KEY_DECRYPT;
1560 } else
1561 /* all CRT forms are decrypts */
1562 function = PCI_FUNC_KEY_DECRYPT;
1563 break;
1564 }
1565 PDEBUG("function: %04x\n", function);
1566 rv = build_caller(we_p, function);
1567 PDEBUG("rv from build_caller = %d\n", rv);
1568 return rv;
1569}
1570
1571static inline int
1572z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1573 const char __user *buffer)
1574{
1575 int rv;
1576
1577 we_p->devindex = -1;
1578 if (funccode == ICARSAMODEXPO)
1579 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1580 else
1581 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1582
1583 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1584 return -EFAULT;
1585
1586 we_p->audit[0] |= FP_COPYFROM;
1587 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1588 we_p->funccode = funccode;
1589 we_p->devtype = -1;
1590 we_p->audit[0] |= FP_BUFFREQ;
1591 rv = get_crypto_request_buffer(we_p);
1592 switch (rv) {
1593 case 0:
1594 we_p->audit[0] |= FP_BUFFGOT;
1595 break;
1596 case SEN_USER_ERROR:
1597 rv = -EINVAL;
1598 break;
1599 case SEN_QUEUE_FULL:
1600 rv = 0;
1601 break;
1602 case SEN_RELEASED:
1603 rv = -EFAULT;
1604 break;
1605 case REC_NO_RESPONSE:
1606 rv = -ENODEV;
1607 break;
1608 case SEN_NOT_AVAIL:
1609 case EGETBUFF:
1610 rv = -EGETBUFF;
1611 break;
1612 default:
1613 PRINTK("rv = %d\n", rv);
1614 rv = -EGETBUFF;
1615 break;
1616 }
1617 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1618 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1619 return rv;
1620}
1621
1622static inline void
1623purge_work_element(struct work_element *we_p)
1624{
1625 struct list_head *lptr;
1626
1627 spin_lock_irq(&queuespinlock);
1628 list_for_each(lptr, &request_list) {
1629 if (lptr == &we_p->liste) {
1630 list_del_init(lptr);
1631 requestq_count--;
1632 break;
1633 }
1634 }
1635 list_for_each(lptr, &pending_list) {
1636 if (lptr == &we_p->liste) {
1637 list_del_init(lptr);
1638 pendingq_count--;
1639 break;
1640 }
1641 }
1642 spin_unlock_irq(&queuespinlock);
1643}
1644
1645/**
1646 * Build the request and send it.
1647 */
1648static inline int
1649z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct work_element *we_p;
1653 int rv;
1654
1655 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1656 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1657 return rv;
1658 }
1659 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1660 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1661 if (!rv)
1662 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1663 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1664 if (!rv) {
1665 we_p->audit[0] |= FP_ASLEEP;
1666 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1667 we_p->audit[0] |= FP_AWAKE;
1668 rv = we_p->retcode;
1669 }
1670 if (!rv)
1671 rv = z90crypt_process_results(we_p, (char __user *)arg);
1672
1673 if ((we_p->status[0] & STAT_FAILED)) {
1674 switch (rv) {
1675 /**
1676 * EINVAL *after* receive is almost always a padding error or
1677 * length error issued by a coprocessor (not an accelerator).
1678 * We convert this return value to -EGETBUFF which should
1679 * trigger a fallback to software.
1680 */
1681 case -EINVAL:
1682 if ((we_p->devtype != PCICA) &&
1683 (we_p->devtype != CEX2A))
1684 rv = -EGETBUFF;
1685 break;
1686 case -ETIMEOUT:
1687 if (z90crypt.mask.st_count > 0)
1688 rv = -ERESTARTSYS; // retry with another
1689 else
1690 rv = -ENODEV; // no cards left
1691 /* fall through to clean up request queue */
1692 case -ERESTARTSYS:
1693 case -ERELEASED:
1694 switch (CHK_RDWRMASK(we_p->status[0])) {
1695 case STAT_WRITTEN:
1696 purge_work_element(we_p);
1697 break;
1698 case STAT_READPEND:
1699 case STAT_NOWORK:
1700 default:
1701 break;
1702 }
1703 break;
1704 default:
1705 we_p->status[0] ^= STAT_FAILED;
1706 break;
1707 }
1708 }
1709 free_page((long)we_p);
1710 return rv;
1711}
1712
1713/**
1714 * This function is a little long, but it's really just one large switch
1715 * statement.
1716 */
1717static long
1718z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1719{
1720 struct priv_data *private_data_p = filp->private_data;
1721 unsigned char *status;
1722 unsigned char *qdepth;
1723 unsigned int *reqcnt;
1724 struct ica_z90_status *pstat;
1725 int ret, i, loopLim, tempstat;
1726 static int deprecated_msg_count1 = 0;
1727 static int deprecated_msg_count2 = 0;
1728
1729 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1730 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1731 cmd,
1732 !_IOC_DIR(cmd) ? "NO"
1733 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1734 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1735 : "WR")),
1736 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1737
1738 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1739 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1740 return -ENOTTY;
1741 }
1742
1743 ret = 0;
1744 switch (cmd) {
1745 case ICARSAMODEXPO:
1746 case ICARSACRT:
1747 if (quiesce_z90crypt) {
1748 ret = -EQUIESCE;
1749 break;
1750 }
1751 ret = -ENODEV; // Default if no devices
1752 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1753 (z90crypt.hdware_info->hdware_mask.disabled_count +
1754 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1755 for (i = 0; i < loopLim; i++) {
1756 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1757 if (ret != -ERESTARTSYS)
1758 break;
1759 }
1760 if (ret == -ERESTARTSYS)
1761 ret = -ENODEV;
1762 break;
1763
1764 case Z90STAT_TOTALCOUNT:
1765 tempstat = get_status_totalcount();
1766 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1767 ret = -EFAULT;
1768 break;
1769
1770 case Z90STAT_PCICACOUNT:
1771 tempstat = get_status_PCICAcount();
1772 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1773 ret = -EFAULT;
1774 break;
1775
1776 case Z90STAT_PCICCCOUNT:
1777 tempstat = get_status_PCICCcount();
1778 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1779 ret = -EFAULT;
1780 break;
1781
1782 case Z90STAT_PCIXCCMCL2COUNT:
1783 tempstat = get_status_PCIXCCMCL2count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1785 ret = -EFAULT;
1786 break;
1787
1788 case Z90STAT_PCIXCCMCL3COUNT:
1789 tempstat = get_status_PCIXCCMCL3count();
1790 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1791 ret = -EFAULT;
1792 break;
1793
1794 case Z90STAT_CEX2CCOUNT:
1795 tempstat = get_status_CEX2Ccount();
1796 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1797 ret = -EFAULT;
1798 break;
1799
1800 case Z90STAT_CEX2ACOUNT:
1801 tempstat = get_status_CEX2Acount();
1802 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1803 ret = -EFAULT;
1804 break;
1805
1806 case Z90STAT_REQUESTQ_COUNT:
1807 tempstat = get_status_requestq_count();
1808 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1809 ret = -EFAULT;
1810 break;
1811
1812 case Z90STAT_PENDINGQ_COUNT:
1813 tempstat = get_status_pendingq_count();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1818 case Z90STAT_TOTALOPEN_COUNT:
1819 tempstat = get_status_totalopen_count();
1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1821 ret = -EFAULT;
1822 break;
1823
1824 case Z90STAT_DOMAIN_INDEX:
1825 tempstat = get_status_domain_index();
1826 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1827 ret = -EFAULT;
1828 break;
1829
1830 case Z90STAT_STATUS_MASK:
1831 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1832 if (!status) {
1833 PRINTK("kmalloc for status failed!\n");
1834 ret = -ENOMEM;
1835 break;
1836 }
1837 get_status_status_mask(status);
1838 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1839 != 0)
1840 ret = -EFAULT;
1841 kfree(status);
1842 break;
1843
1844 case Z90STAT_QDEPTH_MASK:
1845 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1846 if (!qdepth) {
1847 PRINTK("kmalloc for qdepth failed!\n");
1848 ret = -ENOMEM;
1849 break;
1850 }
1851 get_status_qdepth_mask(qdepth);
1852 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1853 ret = -EFAULT;
1854 kfree(qdepth);
1855 break;
1856
1857 case Z90STAT_PERDEV_REQCNT:
1858 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1859 if (!reqcnt) {
1860 PRINTK("kmalloc for reqcnt failed!\n");
1861 ret = -ENOMEM;
1862 break;
1863 }
1864 get_status_perdevice_reqcnt(reqcnt);
1865 if (copy_to_user((char __user *) arg, reqcnt,
1866 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1867 ret = -EFAULT;
1868 kfree(reqcnt);
1869 break;
1870
1871 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1872 case ICAZ90STATUS:
1873 if (deprecated_msg_count1 < 20) {
1874 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1875 deprecated_msg_count1++;
1876 if (deprecated_msg_count1 == 20)
1877 PRINTK("No longer issuing messages related to "
1878 "deprecated call to ICAZ90STATUS.\n");
1879 }
1880
1881 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1882 if (!pstat) {
1883 PRINTK("kmalloc for pstat failed!\n");
1884 ret = -ENOMEM;
1885 break;
1886 }
1887
1888 pstat->totalcount = get_status_totalcount();
1889 pstat->leedslitecount = get_status_PCICAcount();
1890 pstat->leeds2count = get_status_PCICCcount();
1891 pstat->requestqWaitCount = get_status_requestq_count();
1892 pstat->pendingqWaitCount = get_status_pendingq_count();
1893 pstat->totalOpenCount = get_status_totalopen_count();
1894 pstat->cryptoDomain = get_status_domain_index();
1895 get_status_status_mask(pstat->status);
1896 get_status_qdepth_mask(pstat->qdepth);
1897
1898 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1899 sizeof(struct ica_z90_status)) != 0)
1900 ret = -EFAULT;
1901 kfree(pstat);
1902 break;
1903
1904 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1905 case Z90STAT_PCIXCCCOUNT:
1906 if (deprecated_msg_count2 < 20) {
1907 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1908 deprecated_msg_count2++;
1909 if (deprecated_msg_count2 == 20)
1910 PRINTK("No longer issuing messages about depre"
1911 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1912 }
1913
1914 tempstat = get_status_PCIXCCcount();
1915 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1916 ret = -EFAULT;
1917 break;
1918
1919 case Z90QUIESCE:
1920 if (current->euid != 0) {
1921 PRINTK("QUIESCE fails: euid %d\n",
1922 current->euid);
1923 ret = -EACCES;
1924 } else {
1925 PRINTK("QUIESCE device from PID %d\n", PID());
1926 quiesce_z90crypt = 1;
1927 }
1928 break;
1929
1930 default:
1931 /* user passed an invalid IOCTL number */
1932 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1933 ret = -ENOTTY;
1934 break;
1935 }
1936
1937 return ret;
1938}
1939
1940static inline int
1941sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1942{
1943 int hl, i;
1944
1945 hl = 0;
1946 for (i = 0; i < len; i++)
1947 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1948 hl += sprintf(outaddr+hl, " ");
1949
1950 return hl;
1951}
1952
1953static inline int
1954sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1955{
1956 int hl, inl, c, cx;
1957
1958 hl = sprintf(outaddr, " ");
1959 inl = 0;
1960 for (c = 0; c < (len / 16); c++) {
1961 hl += sprintcl(outaddr+hl, addr+inl, 16);
1962 inl += 16;
1963 }
1964
1965 cx = len%16;
1966 if (cx) {
1967 hl += sprintcl(outaddr+hl, addr+inl, cx);
1968 inl += cx;
1969 }
1970
1971 hl += sprintf(outaddr+hl, "\n");
1972
1973 return hl;
1974}
1975
1976static inline int
1977sprinthx(unsigned char *title, unsigned char *outaddr,
1978 unsigned char *addr, unsigned int len)
1979{
1980 int hl, inl, r, rx;
1981
1982 hl = sprintf(outaddr, "\n%s\n", title);
1983 inl = 0;
1984 for (r = 0; r < (len / 64); r++) {
1985 hl += sprintrw(outaddr+hl, addr+inl, 64);
1986 inl += 64;
1987 }
1988 rx = len % 64;
1989 if (rx) {
1990 hl += sprintrw(outaddr+hl, addr+inl, rx);
1991 inl += rx;
1992 }
1993
1994 hl += sprintf(outaddr+hl, "\n");
1995
1996 return hl;
1997}
1998
1999static inline int
2000sprinthx4(unsigned char *title, unsigned char *outaddr,
2001 unsigned int *array, unsigned int len)
2002{
2003 int hl, r;
2004
2005 hl = sprintf(outaddr, "\n%s\n", title);
2006
2007 for (r = 0; r < len; r++) {
2008 if ((r % 8) == 0)
2009 hl += sprintf(outaddr+hl, " ");
2010 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2011 if ((r % 8) == 7)
2012 hl += sprintf(outaddr+hl, "\n");
2013 }
2014
2015 hl += sprintf(outaddr+hl, "\n");
2016
2017 return hl;
2018}
2019
2020static int
2021z90crypt_status(char *resp_buff, char **start, off_t offset,
2022 int count, int *eof, void *data)
2023{
2024 unsigned char *workarea;
2025 int len;
2026
2027 /* resp_buff is a page. Use the right half for a work area */
2028 workarea = resp_buff+2000;
2029 len = 0;
2030 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2031 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2032 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2033 get_status_domain_index());
2034 len += sprintf(resp_buff+len, "Total device count: %d\n",
2035 get_status_totalcount());
2036 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2037 get_status_PCICAcount());
2038 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2039 get_status_PCICCcount());
2040 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2041 get_status_PCIXCCMCL2count());
2042 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2043 get_status_PCIXCCMCL3count());
2044 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2045 get_status_CEX2Ccount());
2046 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2047 get_status_CEX2Acount());
2048 len += sprintf(resp_buff+len, "requestq count: %d\n",
2049 get_status_requestq_count());
2050 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2051 get_status_pendingq_count());
2052 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2053 get_status_totalopen_count());
2054 len += sprinthx(
2055 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2056 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2057 resp_buff+len,
2058 get_status_status_mask(workarea),
2059 Z90CRYPT_NUM_APS);
2060 len += sprinthx("Waiting work element counts",
2061 resp_buff+len,
2062 get_status_qdepth_mask(workarea),
2063 Z90CRYPT_NUM_APS);
2064 len += sprinthx4(
2065 "Per-device successfully completed request counts",
2066 resp_buff+len,
2067 get_status_perdevice_reqcnt((unsigned int *)workarea),
2068 Z90CRYPT_NUM_APS);
2069 *eof = 1;
2070 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2071 return len;
2072}
2073
2074static inline void
2075disable_card(int card_index)
2076{
2077 struct device *devp;
2078
2079 devp = LONG2DEVPTR(card_index);
2080 if (!devp || devp->user_disabled)
2081 return;
2082 devp->user_disabled = 1;
2083 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2084 if (devp->dev_type == -1)
2085 return;
2086 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2087}
2088
2089static inline void
2090enable_card(int card_index)
2091{
2092 struct device *devp;
2093
2094 devp = LONG2DEVPTR(card_index);
2095 if (!devp || !devp->user_disabled)
2096 return;
2097 devp->user_disabled = 0;
2098 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2099 if (devp->dev_type == -1)
2100 return;
2101 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2102}
2103
2104static int
2105z90crypt_status_write(struct file *file, const char __user *buffer,
2106 unsigned long count, void *data)
2107{
2108 int j, eol;
2109 unsigned char *lbuf, *ptr;
2110 unsigned int local_count;
2111
2112#define LBUFSIZE 1200
2113 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2114 if (!lbuf) {
2115 PRINTK("kmalloc failed!\n");
2116 return 0;
2117 }
2118
2119 if (count <= 0)
2120 return 0;
2121
2122 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2123
2124 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2125 kfree(lbuf);
2126 return -EFAULT;
2127 }
2128
2129 lbuf[local_count] = '\0';
2130
2131 ptr = strstr(lbuf, "Online devices");
2132 if (ptr == 0) {
2133 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2134 kfree(lbuf);
2135 return count;
2136 }
2137
2138 ptr = strstr(ptr, "\n");
2139 if (ptr == 0) {
2140 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2141 kfree(lbuf);
2142 return count;
2143 }
2144 ptr++;
2145
2146 if (strstr(ptr, "Waiting work element counts") == NULL) {
2147 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2148 kfree(lbuf);
2149 return count;
2150 }
2151
2152 j = 0;
2153 eol = 0;
2154 while ((j < 64) && (*ptr != '\0')) {
2155 switch (*ptr) {
2156 case '\t':
2157 case ' ':
2158 break;
2159 case '\n':
2160 default:
2161 eol = 1;
2162 break;
2163 case '0': // no device
2164 case '1': // PCICA
2165 case '2': // PCICC
2166 case '3': // PCIXCC_MCL2
2167 case '4': // PCIXCC_MCL3
2168 case '5': // CEX2C
2169 case '6': // CEX2A
2170 j++;
2171 break;
2172 case 'd':
2173 case 'D':
2174 disable_card(j);
2175 j++;
2176 break;
2177 case 'e':
2178 case 'E':
2179 enable_card(j);
2180 j++;
2181 break;
2182 }
2183 if (eol)
2184 break;
2185 ptr++;
2186 }
2187
2188 kfree(lbuf);
2189 return count;
2190}
2191
2192/**
2193 * Functions that run under a timer, with no process id
2194 *
2195 * The task functions:
2196 * z90crypt_reader_task
2197 * helper_send_work
2198 * helper_handle_work_element
2199 * helper_receive_rc
2200 * z90crypt_config_task
2201 * z90crypt_cleanup_task
2202 *
2203 * Helper functions:
2204 * z90crypt_schedule_reader_timer
2205 * z90crypt_schedule_reader_task
2206 * z90crypt_schedule_config_task
2207 * z90crypt_schedule_cleanup_task
2208 */
2209static inline int
2210receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2211 unsigned char *buff, unsigned char __user **dest_p_p)
2212{
2213 int dv, rv;
2214 struct device *dev_ptr;
2215 struct caller *caller_p;
2216 struct ica_rsa_modexpo *icaMsg_p;
2217 struct list_head *ptr, *tptr;
2218
2219 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2220
2221 if (z90crypt.terminating)
2222 return REC_FATAL_ERROR;
2223
2224 caller_p = 0;
2225 dev_ptr = z90crypt.device_p[index];
2226 rv = 0;
2227 do {
2228 if (!dev_ptr || dev_ptr->disabled) {
2229 rv = REC_NO_WORK; // a disabled device can't return work
2230 break;
2231 }
2232 if (dev_ptr->dev_self_x != index) {
2233 PRINTKC("Corrupt dev ptr\n");
2234 z90crypt.terminating = 1;
2235 rv = REC_FATAL_ERROR;
2236 break;
2237 }
2238 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2239 dv = DEV_REC_EXCEPTION;
2240 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2241 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2242 } else {
2243 PDEBUG("Dequeue called for device %d\n", index);
2244 dv = receive_from_AP(index, z90crypt.cdx,
2245 dev_ptr->dev_resp_l,
2246 dev_ptr->dev_resp_p, psmid);
2247 }
2248 switch (dv) {
2249 case DEV_REC_EXCEPTION:
2250 rv = REC_FATAL_ERROR;
2251 z90crypt.terminating = 1;
2252 PRINTKC("Exception in receive from device %d\n",
2253 index);
2254 break;
2255 case DEV_ONLINE:
2256 rv = 0;
2257 break;
2258 case DEV_EMPTY:
2259 rv = REC_EMPTY;
2260 break;
2261 case DEV_NO_WORK:
2262 rv = REC_NO_WORK;
2263 break;
2264 case DEV_BAD_MESSAGE:
2265 case DEV_GONE:
2266 case REC_HARDWAR_ERR:
2267 default:
2268 rv = REC_NO_RESPONSE;
2269 break;
2270 }
2271 if (rv)
2272 break;
2273 if (dev_ptr->dev_caller_count <= 0) {
2274 rv = REC_USER_GONE;
2275 break;
2276 }
2277
2278 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2279 caller_p = list_entry(ptr, struct caller, caller_liste);
2280 if (!memcmp(caller_p->caller_id, psmid,
2281 sizeof(caller_p->caller_id))) {
2282 if (!list_empty(&caller_p->caller_liste)) {
2283 list_del_init(ptr);
2284 dev_ptr->dev_caller_count--;
2285 break;
2286 }
2287 }
2288 caller_p = 0;
2289 }
2290 if (!caller_p) {
2291 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2292 "%02X%02X%02X in device list\n",
2293 psmid[0], psmid[1], psmid[2], psmid[3],
2294 psmid[4], psmid[5], psmid[6], psmid[7]);
2295 rv = REC_USER_GONE;
2296 break;
2297 }
2298
2299 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2300 rv = convert_response(dev_ptr->dev_resp_p,
2301 caller_p->caller_buf_p, buff_len_p, buff);
2302 switch (rv) {
2303 case REC_USE_PCICA:
2304 break;
2305 case REC_OPERAND_INV:
2306 case REC_OPERAND_SIZE:
2307 case REC_EVEN_MOD:
2308 case REC_INVALID_PAD:
2309 PDEBUG("device %d: 'user error' %d\n", index, rv);
2310 break;
2311 case WRONG_DEVICE_TYPE:
2312 case REC_HARDWAR_ERR:
2313 case REC_BAD_MESSAGE:
2314 PRINTKW("device %d: hardware error %d\n", index, rv);
2315 rv = REC_NO_RESPONSE;
2316 break;
2317 default:
2318 PDEBUG("device %d: rv = %d\n", index, rv);
2319 break;
2320 }
2321 } while (0);
2322
2323 switch (rv) {
2324 case 0:
2325 PDEBUG("Successful receive from device %d\n", index);
2326 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2327 *dest_p_p = icaMsg_p->outputdata;
2328 if (*buff_len_p == 0)
2329 PRINTK("Zero *buff_len_p\n");
2330 break;
2331 case REC_NO_RESPONSE:
2332 PRINTKW("Removing device %d from availability\n", index);
2333 remove_device(dev_ptr);
2334 break;
2335 }
2336
2337 if (caller_p)
2338 unbuild_caller(dev_ptr, caller_p);
2339
2340 return rv;
2341}
2342
2343static inline void
2344helper_send_work(int index)
2345{
2346 struct work_element *rq_p;
2347 int rv;
2348
2349 if (list_empty(&request_list))
2350 return;
2351 requestq_count--;
2352 rq_p = list_entry(request_list.next, struct work_element, liste);
2353 list_del_init(&rq_p->liste);
2354 rq_p->audit[1] |= FP_REMREQUEST;
2355 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2356 rq_p->devindex = SHRT2LONG(index);
2357 rv = send_to_crypto_device(rq_p);
2358 if (rv == 0) {
2359 rq_p->requestsent = jiffies;
2360 rq_p->audit[0] |= FP_SENT;
2361 list_add_tail(&rq_p->liste, &pending_list);
2362 ++pendingq_count;
2363 rq_p->audit[0] |= FP_PENDING;
2364 } else {
2365 switch (rv) {
2366 case REC_OPERAND_INV:
2367 case REC_OPERAND_SIZE:
2368 case REC_EVEN_MOD:
2369 case REC_INVALID_PAD:
2370 rq_p->retcode = -EINVAL;
2371 break;
2372 case SEN_NOT_AVAIL:
2373 case SEN_RETRY:
2374 case REC_NO_RESPONSE:
2375 default:
2376 if (z90crypt.mask.st_count > 1)
2377 rq_p->retcode =
2378 -ERESTARTSYS;
2379 else
2380 rq_p->retcode = -ENODEV;
2381 break;
2382 }
2383 rq_p->status[0] |= STAT_FAILED;
2384 rq_p->audit[1] |= FP_AWAKENING;
2385 atomic_set(&rq_p->alarmrung, 1);
2386 wake_up(&rq_p->waitq);
2387 }
2388 } else {
2389 if (z90crypt.mask.st_count > 1)
2390 rq_p->retcode = -ERESTARTSYS;
2391 else
2392 rq_p->retcode = -ENODEV;
2393 rq_p->status[0] |= STAT_FAILED;
2394 rq_p->audit[1] |= FP_AWAKENING;
2395 atomic_set(&rq_p->alarmrung, 1);
2396 wake_up(&rq_p->waitq);
2397 }
2398}
2399
2400static inline void
2401helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2402 int buff_len, unsigned char *buff,
2403 unsigned char __user *resp_addr)
2404{
2405 struct work_element *pq_p;
2406 struct list_head *lptr, *tptr;
2407
2408 pq_p = 0;
2409 list_for_each_safe(lptr, tptr, &pending_list) {
2410 pq_p = list_entry(lptr, struct work_element, liste);
2411 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2412 list_del_init(lptr);
2413 pendingq_count--;
2414 pq_p->audit[1] |= FP_NOTPENDING;
2415 break;
2416 }
2417 pq_p = 0;
2418 }
2419
2420 if (!pq_p) {
2421 PRINTK("device %d has work but no caller exists on pending Q\n",
2422 SHRT2LONG(index));
2423 return;
2424 }
2425
2426 switch (rc) {
2427 case 0:
2428 pq_p->resp_buff_size = buff_len;
2429 pq_p->audit[1] |= FP_RESPSIZESET;
2430 if (buff_len) {
2431 pq_p->resp_addr = resp_addr;
2432 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2433 memcpy(pq_p->resp_buff, buff, buff_len);
2434 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2435 }
2436 break;
2437 case REC_OPERAND_INV:
2438 case REC_OPERAND_SIZE:
2439 case REC_EVEN_MOD:
2440 case REC_INVALID_PAD:
2441 PDEBUG("-EINVAL after application error %d\n", rc);
2442 pq_p->retcode = -EINVAL;
2443 pq_p->status[0] |= STAT_FAILED;
2444 break;
2445 case REC_USE_PCICA:
2446 pq_p->retcode = -ERESTARTSYS;
2447 pq_p->status[0] |= STAT_FAILED;
2448 break;
2449 case REC_NO_RESPONSE:
2450 default:
2451 if (z90crypt.mask.st_count > 1)
2452 pq_p->retcode = -ERESTARTSYS;
2453 else
2454 pq_p->retcode = -ENODEV;
2455 pq_p->status[0] |= STAT_FAILED;
2456 break;
2457 }
2458 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2459 pq_p->audit[1] |= FP_AWAKENING;
2460 atomic_set(&pq_p->alarmrung, 1);
2461 wake_up(&pq_p->waitq);
2462 }
2463}
2464
2465/**
2466 * return TRUE if the work element should be removed from the queue
2467 */
2468static inline int
2469helper_receive_rc(int index, int *rc_p)
2470{
2471 switch (*rc_p) {
2472 case 0:
2473 case REC_OPERAND_INV:
2474 case REC_OPERAND_SIZE:
2475 case REC_EVEN_MOD:
2476 case REC_INVALID_PAD:
2477 case REC_USE_PCICA:
2478 break;
2479
2480 case REC_BUSY:
2481 case REC_NO_WORK:
2482 case REC_EMPTY:
2483 case REC_RETRY_DEV:
2484 case REC_FATAL_ERROR:
2485 return 0;
2486
2487 case REC_NO_RESPONSE:
2488 break;
2489
2490 default:
2491 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2492 *rc_p, SHRT2LONG(index));
2493 *rc_p = REC_NO_RESPONSE;
2494 break;
2495 }
2496 return 1;
2497}
2498
2499static inline void
2500z90crypt_schedule_reader_timer(void)
2501{
2502 if (timer_pending(&reader_timer))
2503 return;
2504 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2505 PRINTK("Timer pending while modifying reader timer\n");
2506}
2507
2508static void
2509z90crypt_reader_task(unsigned long ptr)
2510{
2511 int workavail, index, rc, buff_len;
2512 unsigned char psmid[8];
2513 unsigned char __user *resp_addr;
2514 static unsigned char buff[1024];
2515
2516 /**
2517 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2518 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2519 * loop, there is no work remaining on the queues.
2520 */
2521 resp_addr = 0;
2522 workavail = 2;
2523 buff_len = 0;
2524 while (workavail) {
2525 workavail--;
2526 rc = 0;
2527 spin_lock_irq(&queuespinlock);
2528 memset(buff, 0x00, sizeof(buff));
2529
2530 /* Dequeue once from each device in round robin. */
2531 for (index = 0; index < z90crypt.mask.st_count; index++) {
2532 PDEBUG("About to receive.\n");
2533 rc = receive_from_crypto_device(SHRT2LONG(index),
2534 psmid,
2535 &buff_len,
2536 buff,
2537 &resp_addr);
2538 PDEBUG("Dequeued: rc = %d.\n", rc);
2539
2540 if (helper_receive_rc(index, &rc)) {
2541 if (rc != REC_NO_RESPONSE) {
2542 helper_send_work(index);
2543 workavail = 2;
2544 }
2545
2546 helper_handle_work_element(index, psmid, rc,
2547 buff_len, buff,
2548 resp_addr);
2549 }
2550
2551 if (rc == REC_FATAL_ERROR)
2552 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2553 SHRT2LONG(index));
2554 }
2555 spin_unlock_irq(&queuespinlock);
2556 }
2557
2558 if (pendingq_count + requestq_count)
2559 z90crypt_schedule_reader_timer();
2560}
2561
2562static inline void
2563z90crypt_schedule_config_task(unsigned int expiration)
2564{
2565 if (timer_pending(&config_timer))
2566 return;
2567 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2568 PRINTK("Timer pending while modifying config timer\n");
2569}
2570
2571static void
2572z90crypt_config_task(unsigned long ptr)
2573{
2574 int rc;
2575
2576 PDEBUG("jiffies %ld\n", jiffies);
2577
2578 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2579 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2580 /* If return was fatal, don't bother reconfiguring */
2581 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2582 z90crypt_schedule_config_task(CONFIGTIME);
2583}
2584
2585static inline void
2586z90crypt_schedule_cleanup_task(void)
2587{
2588 if (timer_pending(&cleanup_timer))
2589 return;
2590 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2591 PRINTK("Timer pending while modifying cleanup timer\n");
2592}
2593
2594static inline void
2595helper_drain_queues(void)
2596{
2597 struct work_element *pq_p;
2598 struct list_head *lptr, *tptr;
2599
2600 list_for_each_safe(lptr, tptr, &pending_list) {
2601 pq_p = list_entry(lptr, struct work_element, liste);
2602 pq_p->retcode = -ENODEV;
2603 pq_p->status[0] |= STAT_FAILED;
2604 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2605 (struct caller *)pq_p->requestptr);
2606 list_del_init(lptr);
2607 pendingq_count--;
2608 pq_p->audit[1] |= FP_NOTPENDING;
2609 pq_p->audit[1] |= FP_AWAKENING;
2610 atomic_set(&pq_p->alarmrung, 1);
2611 wake_up(&pq_p->waitq);
2612 }
2613
2614 list_for_each_safe(lptr, tptr, &request_list) {
2615 pq_p = list_entry(lptr, struct work_element, liste);
2616 pq_p->retcode = -ENODEV;
2617 pq_p->status[0] |= STAT_FAILED;
2618 list_del_init(lptr);
2619 requestq_count--;
2620 pq_p->audit[1] |= FP_REMREQUEST;
2621 pq_p->audit[1] |= FP_AWAKENING;
2622 atomic_set(&pq_p->alarmrung, 1);
2623 wake_up(&pq_p->waitq);
2624 }
2625}
2626
2627static inline void
2628helper_timeout_requests(void)
2629{
2630 struct work_element *pq_p;
2631 struct list_head *lptr, *tptr;
2632 long timelimit;
2633
2634 timelimit = jiffies - (CLEANUPTIME * HZ);
2635 /* The list is in strict chronological order */
2636 list_for_each_safe(lptr, tptr, &pending_list) {
2637 pq_p = list_entry(lptr, struct work_element, liste);
2638 if (pq_p->requestsent >= timelimit)
2639 break;
2640 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2641 ((struct caller *)pq_p->requestptr)->caller_id[0],
2642 ((struct caller *)pq_p->requestptr)->caller_id[1],
2643 ((struct caller *)pq_p->requestptr)->caller_id[2],
2644 ((struct caller *)pq_p->requestptr)->caller_id[3],
2645 ((struct caller *)pq_p->requestptr)->caller_id[4],
2646 ((struct caller *)pq_p->requestptr)->caller_id[5],
2647 ((struct caller *)pq_p->requestptr)->caller_id[6],
2648 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2649 pq_p->retcode = -ETIMEOUT;
2650 pq_p->status[0] |= STAT_FAILED;
2651 /* get this off any caller queue it may be on */
2652 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2653 (struct caller *) pq_p->requestptr);
2654 list_del_init(lptr);
2655 pendingq_count--;
2656 pq_p->audit[1] |= FP_TIMEDOUT;
2657 pq_p->audit[1] |= FP_NOTPENDING;
2658 pq_p->audit[1] |= FP_AWAKENING;
2659 atomic_set(&pq_p->alarmrung, 1);
2660 wake_up(&pq_p->waitq);
2661 }
2662
2663 /**
2664 * If pending count is zero, items left on the request queue may
2665 * never be processed.
2666 */
2667 if (pendingq_count <= 0) {
2668 list_for_each_safe(lptr, tptr, &request_list) {
2669 pq_p = list_entry(lptr, struct work_element, liste);
2670 if (pq_p->requestsent >= timelimit)
2671 break;
2672 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2673 ((struct caller *)pq_p->requestptr)->caller_id[0],
2674 ((struct caller *)pq_p->requestptr)->caller_id[1],
2675 ((struct caller *)pq_p->requestptr)->caller_id[2],
2676 ((struct caller *)pq_p->requestptr)->caller_id[3],
2677 ((struct caller *)pq_p->requestptr)->caller_id[4],
2678 ((struct caller *)pq_p->requestptr)->caller_id[5],
2679 ((struct caller *)pq_p->requestptr)->caller_id[6],
2680 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2681 pq_p->retcode = -ETIMEOUT;
2682 pq_p->status[0] |= STAT_FAILED;
2683 list_del_init(lptr);
2684 requestq_count--;
2685 pq_p->audit[1] |= FP_TIMEDOUT;
2686 pq_p->audit[1] |= FP_REMREQUEST;
2687 pq_p->audit[1] |= FP_AWAKENING;
2688 atomic_set(&pq_p->alarmrung, 1);
2689 wake_up(&pq_p->waitq);
2690 }
2691 }
2692}
2693
2694static void
2695z90crypt_cleanup_task(unsigned long ptr)
2696{
2697 PDEBUG("jiffies %ld\n", jiffies);
2698 spin_lock_irq(&queuespinlock);
2699 if (z90crypt.mask.st_count <= 0) // no devices!
2700 helper_drain_queues();
2701 else
2702 helper_timeout_requests();
2703 spin_unlock_irq(&queuespinlock);
2704 z90crypt_schedule_cleanup_task();
2705}
2706
2707static void
2708z90crypt_schedule_reader_task(unsigned long ptr)
2709{
2710 tasklet_schedule(&reader_tasklet);
2711}
2712
2713/**
2714 * Lowlevel Functions:
2715 *
2716 * create_z90crypt: creates and initializes basic data structures
2717 * refresh_z90crypt: re-initializes basic data structures
2718 * find_crypto_devices: returns a count and mask of hardware status
2719 * create_crypto_device: builds the descriptor for a device
2720 * destroy_crypto_device: unallocates the descriptor for a device
2721 * destroy_z90crypt: drains all work, unallocates structs
2722 */
2723
2724/**
2725 * build the z90crypt root structure using the given domain index
2726 */
2727static int
2728create_z90crypt(int *cdx_p)
2729{
2730 struct hdware_block *hdware_blk_p;
2731
2732 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2733 z90crypt.domain_established = 0;
2734 z90crypt.len = sizeof(struct z90crypt);
2735 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2736 z90crypt.cdx = *cdx_p;
2737
2738 hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2739 if (!hdware_blk_p) {
2740 PDEBUG("kmalloc for hardware block failed\n");
2741 return ENOMEM;
2742 }
2743 z90crypt.hdware_info = hdware_blk_p;
2744
2745 return 0;
2746}
2747
2748static inline int
2749helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2750{
2751 enum hdstat hd_stat;
2752 int q_depth, dev_type;
2753 int indx, chkdom, numdomains;
2754
2755 q_depth = dev_type = numdomains = 0;
2756 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2757 for (indx = 0; indx < z90crypt.max_count; indx++) {
2758 hd_stat = HD_NOT_THERE;
2759 numdomains = 0;
2760 for (chkdom = 0; chkdom <= 15; chkdom++) {
2761 hd_stat = query_online(indx, chkdom, MAX_RESET,
2762 &q_depth, &dev_type);
2763 if (hd_stat == HD_TSQ_EXCEPTION) {
2764 z90crypt.terminating = 1;
2765 PRINTKC("exception taken!\n");
2766 break;
2767 }
2768 if (hd_stat == HD_ONLINE) {
2769 cdx_array[numdomains++] = chkdom;
2770 if (*cdx_p == chkdom) {
2771 *correct_cdx_found = 1;
2772 break;
2773 }
2774 }
2775 }
2776 if ((*correct_cdx_found == 1) || (numdomains != 0))
2777 break;
2778 if (z90crypt.terminating)
2779 break;
2780 }
2781 return numdomains;
2782}
2783
2784static inline int
2785probe_crypto_domain(int *cdx_p)
2786{
2787 int cdx_array[16];
2788 char cdx_array_text[53], temp[5];
2789 int correct_cdx_found, numdomains;
2790
2791 correct_cdx_found = 0;
2792 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2793
2794 if (z90crypt.terminating)
2795 return TSQ_FATAL_ERROR;
2796
2797 if (correct_cdx_found)
2798 return 0;
2799
2800 if (numdomains == 0) {
2801 PRINTKW("Unable to find crypto domain: No devices found\n");
2802 return Z90C_NO_DEVICES;
2803 }
2804
2805 if (numdomains == 1) {
2806 if (*cdx_p == -1) {
2807 *cdx_p = cdx_array[0];
2808 return 0;
2809 }
2810 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2811 *cdx_p, cdx_array[0]);
2812 return Z90C_INCORRECT_DOMAIN;
2813 }
2814
2815 numdomains--;
2816 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2817 while (numdomains) {
2818 numdomains--;
2819 sprintf(temp, ", %d", cdx_array[numdomains]);
2820 strcat(cdx_array_text, temp);
2821 }
2822
2823 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2824 *cdx_p, cdx_array_text);
2825 return Z90C_AMBIGUOUS_DOMAIN;
2826}
2827
2828static int
2829refresh_z90crypt(int *cdx_p)
2830{
2831 int i, j, indx, rv;
2832 static struct status local_mask;
2833 struct device *devPtr;
2834 unsigned char oldStat, newStat;
2835 int return_unchanged;
2836
2837 if (z90crypt.len != sizeof(z90crypt))
2838 return ENOTINIT;
2839 if (z90crypt.terminating)
2840 return TSQ_FATAL_ERROR;
2841 rv = 0;
2842 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2843 !z90crypt.domain_established) {
2844 rv = probe_crypto_domain(cdx_p);
2845 if (z90crypt.terminating)
2846 return TSQ_FATAL_ERROR;
2847 if (rv == Z90C_NO_DEVICES)
2848 return 0; // try later
2849 if (rv)
2850 return rv;
2851 z90crypt.cdx = *cdx_p;
2852 z90crypt.domain_established = 1;
2853 }
2854 rv = find_crypto_devices(&local_mask);
2855 if (rv) {
2856 PRINTK("find crypto devices returned %d\n", rv);
2857 return rv;
2858 }
2859 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2860 sizeof(struct status))) {
2861 return_unchanged = 1;
2862 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2863 /**
2864 * Check for disabled cards. If any device is marked
2865 * disabled, destroy it.
2866 */
2867 for (j = 0;
2868 j < z90crypt.hdware_info->type_mask[i].st_count;
2869 j++) {
2870 indx = z90crypt.hdware_info->type_x_addr[i].
2871 device_index[j];
2872 devPtr = z90crypt.device_p[indx];
2873 if (devPtr && devPtr->disabled) {
2874 local_mask.st_mask[indx] = HD_NOT_THERE;
2875 return_unchanged = 0;
2876 }
2877 }
2878 }
2879 if (return_unchanged == 1)
2880 return 0;
2881 }
2882
2883 spin_lock_irq(&queuespinlock);
2884 for (i = 0; i < z90crypt.max_count; i++) {
2885 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2886 newStat = local_mask.st_mask[i];
2887 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2888 destroy_crypto_device(i);
2889 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2890 rv = create_crypto_device(i);
2891 if (rv >= REC_FATAL_ERROR)
2892 return rv;
2893 if (rv != 0) {
2894 local_mask.st_mask[i] = HD_NOT_THERE;
2895 local_mask.st_count--;
2896 }
2897 }
2898 }
2899 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2900 sizeof(local_mask.st_mask));
2901 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2902 z90crypt.hdware_info->hdware_mask.disabled_count =
2903 local_mask.disabled_count;
2904 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2905 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2906 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2907 &(z90crypt.hdware_info->type_x_addr[i]));
2908 spin_unlock_irq(&queuespinlock);
2909
2910 return rv;
2911}
2912
2913static int
2914find_crypto_devices(struct status *deviceMask)
2915{
2916 int i, q_depth, dev_type;
2917 enum hdstat hd_stat;
2918
2919 deviceMask->st_count = 0;
2920 deviceMask->disabled_count = 0;
2921 deviceMask->user_disabled_count = 0;
2922
2923 for (i = 0; i < z90crypt.max_count; i++) {
2924 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2925 &dev_type);
2926 if (hd_stat == HD_TSQ_EXCEPTION) {
2927 z90crypt.terminating = 1;
2928 PRINTKC("Exception during probe for crypto devices\n");
2929 return TSQ_FATAL_ERROR;
2930 }
2931 deviceMask->st_mask[i] = hd_stat;
2932 if (hd_stat == HD_ONLINE) {
2933 PDEBUG("Got an online crypto!: %d\n", i);
2934 PDEBUG("Got a queue depth of %d\n", q_depth);
2935 PDEBUG("Got a device type of %d\n", dev_type);
2936 if (q_depth <= 0)
2937 return TSQ_FATAL_ERROR;
2938 deviceMask->st_count++;
2939 z90crypt.q_depth_array[i] = q_depth;
2940 z90crypt.dev_type_array[i] = dev_type;
2941 }
2942 }
2943
2944 return 0;
2945}
2946
2947static int
2948refresh_index_array(struct status *status_str, struct device_x *index_array)
2949{
2950 int i, count;
2951 enum devstat stat;
2952
2953 i = -1;
2954 count = 0;
2955 do {
2956 stat = status_str->st_mask[++i];
2957 if (stat == DEV_ONLINE)
2958 index_array->device_index[count++] = i;
2959 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2960
2961 return count;
2962}
2963
2964static int
2965create_crypto_device(int index)
2966{
2967 int rv, devstat, total_size;
2968 struct device *dev_ptr;
2969 struct status *type_str_p;
2970 int deviceType;
2971
2972 dev_ptr = z90crypt.device_p[index];
2973 if (!dev_ptr) {
2974 total_size = sizeof(struct device) +
2975 z90crypt.q_depth_array[index] * sizeof(int);
2976
2977 dev_ptr = kzalloc(total_size, GFP_ATOMIC);
2978 if (!dev_ptr) {
2979 PRINTK("kmalloc device %d failed\n", index);
2980 return ENOMEM;
2981 }
2982 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2983 if (!dev_ptr->dev_resp_p) {
2984 kfree(dev_ptr);
2985 PRINTK("kmalloc device %d rec buffer failed\n", index);
2986 return ENOMEM;
2987 }
2988 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2989 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2990 }
2991
2992 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2993 if (devstat == DEV_RSQ_EXCEPTION) {
2994 PRINTK("exception during reset device %d\n", index);
2995 kfree(dev_ptr->dev_resp_p);
2996 kfree(dev_ptr);
2997 return RSQ_FATAL_ERROR;
2998 }
2999 if (devstat == DEV_ONLINE) {
3000 dev_ptr->dev_self_x = index;
3001 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3002 if (dev_ptr->dev_type == NILDEV) {
3003 rv = probe_device_type(dev_ptr);
3004 if (rv) {
3005 PRINTK("rv = %d from probe_device_type %d\n",
3006 rv, index);
3007 kfree(dev_ptr->dev_resp_p);
3008 kfree(dev_ptr);
3009 return rv;
3010 }
3011 }
3012 if (dev_ptr->dev_type == PCIXCC_UNK) {
3013 rv = probe_PCIXCC_type(dev_ptr);
3014 if (rv) {
3015 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3016 rv, index);
3017 kfree(dev_ptr->dev_resp_p);
3018 kfree(dev_ptr);
3019 return rv;
3020 }
3021 }
3022 deviceType = dev_ptr->dev_type;
3023 z90crypt.dev_type_array[index] = deviceType;
3024 if (deviceType == PCICA)
3025 z90crypt.hdware_info->device_type_array[index] = 1;
3026 else if (deviceType == PCICC)
3027 z90crypt.hdware_info->device_type_array[index] = 2;
3028 else if (deviceType == PCIXCC_MCL2)
3029 z90crypt.hdware_info->device_type_array[index] = 3;
3030 else if (deviceType == PCIXCC_MCL3)
3031 z90crypt.hdware_info->device_type_array[index] = 4;
3032 else if (deviceType == CEX2C)
3033 z90crypt.hdware_info->device_type_array[index] = 5;
3034 else if (deviceType == CEX2A)
3035 z90crypt.hdware_info->device_type_array[index] = 6;
3036 else // No idea how this would happen.
3037 z90crypt.hdware_info->device_type_array[index] = -1;
3038 }
3039
3040 /**
3041 * 'q_depth' returned by the hardware is one less than
3042 * the actual depth
3043 */
3044 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3045 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3046 dev_ptr->dev_stat = devstat;
3047 dev_ptr->disabled = 0;
3048 z90crypt.device_p[index] = dev_ptr;
3049
3050 if (devstat == DEV_ONLINE) {
3051 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3052 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3053 z90crypt.mask.st_count++;
3054 }
3055 deviceType = dev_ptr->dev_type;
3056 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3057 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3058 type_str_p->st_mask[index] = DEV_ONLINE;
3059 type_str_p->st_count++;
3060 }
3061 }
3062
3063 return 0;
3064}
3065
3066static int
3067destroy_crypto_device(int index)
3068{
3069 struct device *dev_ptr;
3070 int t, disabledFlag;
3071
3072 dev_ptr = z90crypt.device_p[index];
3073
3074 /* remember device type; get rid of device struct */
3075 if (dev_ptr) {
3076 disabledFlag = dev_ptr->disabled;
3077 t = dev_ptr->dev_type;
3078 kfree(dev_ptr->dev_resp_p);
3079 kfree(dev_ptr);
3080 } else {
3081 disabledFlag = 0;
3082 t = -1;
3083 }
3084 z90crypt.device_p[index] = 0;
3085
3086 /* if the type is valid, remove the device from the type_mask */
3087 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3088 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3089 z90crypt.hdware_info->type_mask[t].st_count--;
3090 if (disabledFlag == 1)
3091 z90crypt.hdware_info->type_mask[t].disabled_count--;
3092 }
3093 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3094 z90crypt.mask.st_mask[index] = DEV_GONE;
3095 z90crypt.mask.st_count--;
3096 }
3097 z90crypt.hdware_info->device_type_array[index] = 0;
3098
3099 return 0;
3100}
3101
3102static void
3103destroy_z90crypt(void)
3104{
3105 int i;
3106
3107 for (i = 0; i < z90crypt.max_count; i++)
3108 if (z90crypt.device_p[i])
3109 destroy_crypto_device(i);
3110 kfree(z90crypt.hdware_info);
3111 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3112}
3113
3114static unsigned char static_testmsg[384] = {
31150x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
31160x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
31170x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
31180x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
31190x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31200x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31210x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
31220x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31230xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31240x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31260x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31270x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
31280x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
31290x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
31300x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
31310x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
31320x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
31330x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
31340x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
31350x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
31360xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
31370x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
31380x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3139};
3140
3141static int
3142probe_device_type(struct device *devPtr)
3143{
3144 int rv, dv, i, index, length;
3145 unsigned char psmid[8];
3146 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3147
3148 index = devPtr->dev_self_x;
3149 rv = 0;
3150 do {
3151 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3152 length = sizeof(static_testmsg) - 24;
3153 /* the -24 allows for the header */
3154 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3155 if (dv) {
3156 PDEBUG("dv returned by send during probe: %d\n", dv);
3157 if (dv == DEV_SEN_EXCEPTION) {
3158 rv = SEN_FATAL_ERROR;
3159 PRINTKC("exception in send to AP %d\n", index);
3160 break;
3161 }
3162 PDEBUG("return value from send_to_AP: %d\n", rv);
3163 switch (dv) {
3164 case DEV_GONE:
3165 PDEBUG("dev %d not available\n", index);
3166 rv = SEN_NOT_AVAIL;
3167 break;
3168 case DEV_ONLINE:
3169 rv = 0;
3170 break;
3171 case DEV_EMPTY:
3172 rv = SEN_NOT_AVAIL;
3173 break;
3174 case DEV_NO_WORK:
3175 rv = SEN_FATAL_ERROR;
3176 break;
3177 case DEV_BAD_MESSAGE:
3178 rv = SEN_USER_ERROR;
3179 break;
3180 case DEV_QUEUE_FULL:
3181 rv = SEN_QUEUE_FULL;
3182 break;
3183 default:
3184 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3185 rv = SEN_NOT_AVAIL;
3186 break;
3187 }
3188 }
3189
3190 if (rv)
3191 break;
3192
3193 for (i = 0; i < 6; i++) {
3194 mdelay(300);
3195 dv = receive_from_AP(index, z90crypt.cdx,
3196 devPtr->dev_resp_l,
3197 devPtr->dev_resp_p, psmid);
3198 PDEBUG("dv returned by DQ = %d\n", dv);
3199 if (dv == DEV_REC_EXCEPTION) {
3200 rv = REC_FATAL_ERROR;
3201 PRINTKC("exception in dequeue %d\n",
3202 index);
3203 break;
3204 }
3205 switch (dv) {
3206 case DEV_ONLINE:
3207 rv = 0;
3208 break;
3209 case DEV_EMPTY:
3210 rv = REC_EMPTY;
3211 break;
3212 case DEV_NO_WORK:
3213 rv = REC_NO_WORK;
3214 break;
3215 case DEV_BAD_MESSAGE:
3216 case DEV_GONE:
3217 default:
3218 rv = REC_NO_RESPONSE;
3219 break;
3220 }
3221 if ((rv != 0) && (rv != REC_NO_WORK))
3222 break;
3223 if (rv == 0)
3224 break;
3225 }
3226 if (rv)
3227 break;
3228 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3229 (devPtr->dev_resp_p[1] == 0x86);
3230 if (rv)
3231 devPtr->dev_type = PCICC;
3232 else
3233 devPtr->dev_type = PCICA;
3234 rv = 0;
3235 } while (0);
3236 /* In a general error case, the card is not marked online */
3237 return rv;
3238}
3239
3240static unsigned char MCL3_testmsg[] = {
32410x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
32420x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32430x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32440x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32450x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
32460x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
32470x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
32480x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
32490x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32580x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32590x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32610x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
32620x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
32630x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
32640xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
32650x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
32660x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
32670x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
32680x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
32690x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
32700xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
32710xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
32720x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
32730x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
32740xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
32750x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3276};
3277
3278static int
3279probe_PCIXCC_type(struct device *devPtr)
3280{
3281 int rv, dv, i, index, length;
3282 unsigned char psmid[8];
3283 static unsigned char loc_testmsg[548];
3284 struct CPRBX *cprbx_p;
3285
3286 index = devPtr->dev_self_x;
3287 rv = 0;
3288 do {
3289 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3290 length = sizeof(MCL3_testmsg) - 0x0C;
3291 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3292 if (dv) {
3293 PDEBUG("dv returned = %d\n", dv);
3294 if (dv == DEV_SEN_EXCEPTION) {
3295 rv = SEN_FATAL_ERROR;
3296 PRINTKC("exception in send to AP %d\n", index);
3297 break;
3298 }
3299 PDEBUG("return value from send_to_AP: %d\n", rv);
3300 switch (dv) {
3301 case DEV_GONE:
3302 PDEBUG("dev %d not available\n", index);
3303 rv = SEN_NOT_AVAIL;
3304 break;
3305 case DEV_ONLINE:
3306 rv = 0;
3307 break;
3308 case DEV_EMPTY:
3309 rv = SEN_NOT_AVAIL;
3310 break;
3311 case DEV_NO_WORK:
3312 rv = SEN_FATAL_ERROR;
3313 break;
3314 case DEV_BAD_MESSAGE:
3315 rv = SEN_USER_ERROR;
3316 break;
3317 case DEV_QUEUE_FULL:
3318 rv = SEN_QUEUE_FULL;
3319 break;
3320 default:
3321 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3322 rv = SEN_NOT_AVAIL;
3323 break;
3324 }
3325 }
3326
3327 if (rv)
3328 break;
3329
3330 for (i = 0; i < 6; i++) {
3331 mdelay(300);
3332 dv = receive_from_AP(index, z90crypt.cdx,
3333 devPtr->dev_resp_l,
3334 devPtr->dev_resp_p, psmid);
3335 PDEBUG("dv returned by DQ = %d\n", dv);
3336 if (dv == DEV_REC_EXCEPTION) {
3337 rv = REC_FATAL_ERROR;
3338 PRINTKC("exception in dequeue %d\n",
3339 index);
3340 break;
3341 }
3342 switch (dv) {
3343 case DEV_ONLINE:
3344 rv = 0;
3345 break;
3346 case DEV_EMPTY:
3347 rv = REC_EMPTY;
3348 break;
3349 case DEV_NO_WORK:
3350 rv = REC_NO_WORK;
3351 break;
3352 case DEV_BAD_MESSAGE:
3353 case DEV_GONE:
3354 default:
3355 rv = REC_NO_RESPONSE;
3356 break;
3357 }
3358 if ((rv != 0) && (rv != REC_NO_WORK))
3359 break;
3360 if (rv == 0)
3361 break;
3362 }
3363 if (rv)
3364 break;
3365 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3366 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3367 devPtr->dev_type = PCIXCC_MCL2;
3368 PDEBUG("device %d is MCL2\n", index);
3369 } else {
3370 devPtr->dev_type = PCIXCC_MCL3;
3371 PDEBUG("device %d is MCL3\n", index);
3372 }
3373 } while (0);
3374 /* In a general error case, the card is not marked online */
3375 return rv;
3376}
3377
3378module_init(z90crypt_init_module);
3379module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 000000000000..1edc10a7a6f2
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1091 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/miscdevice.h>
34#include <linux/fs.h>
35#include <linux/proc_fs.h>
36#include <linux/compat.h>
37#include <asm/atomic.h>
38#include <asm/uaccess.h>
39
40#include "zcrypt_api.h"
41
42/**
43 * Module description.
44 */
45MODULE_AUTHOR("IBM Corporation");
46MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
47 "Copyright 2001, 2006 IBM Corporation");
48MODULE_LICENSE("GPL");
49
50static DEFINE_SPINLOCK(zcrypt_device_lock);
51static LIST_HEAD(zcrypt_device_list);
52static int zcrypt_device_count = 0;
53static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
54
55/**
56 * Device attributes common for all crypto devices.
57 */
58static ssize_t zcrypt_type_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
62 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
63}
64
65static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
66
67static ssize_t zcrypt_online_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
71 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
72}
73
74static ssize_t zcrypt_online_store(struct device *dev,
75 struct device_attribute *attr,
76 const char *buf, size_t count)
77{
78 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
79 int online;
80
81 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
82 return -EINVAL;
83 zdev->online = online;
84 if (!online)
85 ap_flush_queue(zdev->ap_dev);
86 return count;
87}
88
89static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
90
91static struct attribute * zcrypt_device_attrs[] = {
92 &dev_attr_type.attr,
93 &dev_attr_online.attr,
94 NULL,
95};
96
97static struct attribute_group zcrypt_device_attr_group = {
98 .attrs = zcrypt_device_attrs,
99};
100
101/**
102 * Move the device towards the head of the device list.
103 * Need to be called while holding the zcrypt device list lock.
104 * Note: cards with speed_rating of 0 are kept at the end of the list.
105 */
106static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
107{
108 struct zcrypt_device *tmp;
109 struct list_head *l;
110
111 if (zdev->speed_rating == 0)
112 return;
113 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
114 tmp = list_entry(l, struct zcrypt_device, list);
115 if ((tmp->request_count + 1) * tmp->speed_rating <=
116 (zdev->request_count + 1) * zdev->speed_rating &&
117 tmp->speed_rating != 0)
118 break;
119 }
120 if (l == zdev->list.prev)
121 return;
122 /* Move zdev behind l */
123 list_del(&zdev->list);
124 list_add(&zdev->list, l);
125}
126
127/**
128 * Move the device towards the tail of the device list.
129 * Need to be called while holding the zcrypt device list lock.
130 * Note: cards with speed_rating of 0 are kept at the end of the list.
131 */
132static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
133{
134 struct zcrypt_device *tmp;
135 struct list_head *l;
136
137 if (zdev->speed_rating == 0)
138 return;
139 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
140 tmp = list_entry(l, struct zcrypt_device, list);
141 if ((tmp->request_count + 1) * tmp->speed_rating >
142 (zdev->request_count + 1) * zdev->speed_rating ||
143 tmp->speed_rating == 0)
144 break;
145 }
146 if (l == zdev->list.next)
147 return;
148 /* Move zdev before l */
149 list_del(&zdev->list);
150 list_add_tail(&zdev->list, l);
151}
152
153static void zcrypt_device_release(struct kref *kref)
154{
155 struct zcrypt_device *zdev =
156 container_of(kref, struct zcrypt_device, refcount);
157 zcrypt_device_free(zdev);
158}
159
160void zcrypt_device_get(struct zcrypt_device *zdev)
161{
162 kref_get(&zdev->refcount);
163}
164EXPORT_SYMBOL(zcrypt_device_get);
165
166int zcrypt_device_put(struct zcrypt_device *zdev)
167{
168 return kref_put(&zdev->refcount, zcrypt_device_release);
169}
170EXPORT_SYMBOL(zcrypt_device_put);
171
172struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
173{
174 struct zcrypt_device *zdev;
175
176 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
177 if (!zdev)
178 return NULL;
179 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
180 if (!zdev->reply.message)
181 goto out_free;
182 zdev->reply.length = max_response_size;
183 spin_lock_init(&zdev->lock);
184 INIT_LIST_HEAD(&zdev->list);
185 return zdev;
186
187out_free:
188 kfree(zdev);
189 return NULL;
190}
191EXPORT_SYMBOL(zcrypt_device_alloc);
192
193void zcrypt_device_free(struct zcrypt_device *zdev)
194{
195 kfree(zdev->reply.message);
196 kfree(zdev);
197}
198EXPORT_SYMBOL(zcrypt_device_free);
199
200/**
201 * Register a crypto device.
202 */
203int zcrypt_device_register(struct zcrypt_device *zdev)
204{
205 int rc;
206
207 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
208 &zcrypt_device_attr_group);
209 if (rc)
210 goto out;
211 get_device(&zdev->ap_dev->device);
212 kref_init(&zdev->refcount);
213 spin_lock_bh(&zcrypt_device_lock);
214 zdev->online = 1; /* New devices are online by default. */
215 list_add_tail(&zdev->list, &zcrypt_device_list);
216 __zcrypt_increase_preference(zdev);
217 zcrypt_device_count++;
218 spin_unlock_bh(&zcrypt_device_lock);
219out:
220 return rc;
221}
222EXPORT_SYMBOL(zcrypt_device_register);
223
224/**
225 * Unregister a crypto device.
226 */
227void zcrypt_device_unregister(struct zcrypt_device *zdev)
228{
229 spin_lock_bh(&zcrypt_device_lock);
230 zcrypt_device_count--;
231 list_del_init(&zdev->list);
232 spin_unlock_bh(&zcrypt_device_lock);
233 sysfs_remove_group(&zdev->ap_dev->device.kobj,
234 &zcrypt_device_attr_group);
235 put_device(&zdev->ap_dev->device);
236 zcrypt_device_put(zdev);
237}
238EXPORT_SYMBOL(zcrypt_device_unregister);
239
240/**
241 * zcrypt_read is not be supported beyond zcrypt 1.3.1
242 */
243static ssize_t zcrypt_read(struct file *filp, char __user *buf,
244 size_t count, loff_t *f_pos)
245{
246 return -EPERM;
247}
248
249/**
250 * Write is is not allowed
251 */
252static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
253 size_t count, loff_t *f_pos)
254{
255 return -EPERM;
256}
257
258/**
259 * Device open/close functions to count number of users.
260 */
261static int zcrypt_open(struct inode *inode, struct file *filp)
262{
263 atomic_inc(&zcrypt_open_count);
264 return 0;
265}
266
267static int zcrypt_release(struct inode *inode, struct file *filp)
268{
269 atomic_dec(&zcrypt_open_count);
270 return 0;
271}
272
273/**
274 * zcrypt ioctls.
275 */
276static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
277{
278 struct zcrypt_device *zdev;
279 int rc;
280
281 if (mex->outputdatalength < mex->inputdatalength)
282 return -EINVAL;
283 /**
284 * As long as outputdatalength is big enough, we can set the
285 * outputdatalength equal to the inputdatalength, since that is the
286 * number of bytes we will copy in any case
287 */
288 mex->outputdatalength = mex->inputdatalength;
289
290 spin_lock_bh(&zcrypt_device_lock);
291 list_for_each_entry(zdev, &zcrypt_device_list, list) {
292 if (!zdev->online ||
293 !zdev->ops->rsa_modexpo ||
294 zdev->min_mod_size > mex->inputdatalength ||
295 zdev->max_mod_size < mex->inputdatalength)
296 continue;
297 zcrypt_device_get(zdev);
298 get_device(&zdev->ap_dev->device);
299 zdev->request_count++;
300 __zcrypt_decrease_preference(zdev);
301 spin_unlock_bh(&zcrypt_device_lock);
302 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
303 rc = zdev->ops->rsa_modexpo(zdev, mex);
304 module_put(zdev->ap_dev->drv->driver.owner);
305 }
306 else
307 rc = -EAGAIN;
308 spin_lock_bh(&zcrypt_device_lock);
309 zdev->request_count--;
310 __zcrypt_increase_preference(zdev);
311 put_device(&zdev->ap_dev->device);
312 zcrypt_device_put(zdev);
313 spin_unlock_bh(&zcrypt_device_lock);
314 return rc;
315 }
316 spin_unlock_bh(&zcrypt_device_lock);
317 return -ENODEV;
318}
319
320static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
321{
322 struct zcrypt_device *zdev;
323 unsigned long long z1, z2, z3;
324 int rc, copied;
325
326 if (crt->outputdatalength < crt->inputdatalength ||
327 (crt->inputdatalength & 1))
328 return -EINVAL;
329 /**
330 * As long as outputdatalength is big enough, we can set the
331 * outputdatalength equal to the inputdatalength, since that is the
332 * number of bytes we will copy in any case
333 */
334 crt->outputdatalength = crt->inputdatalength;
335
336 copied = 0;
337 restart:
338 spin_lock_bh(&zcrypt_device_lock);
339 list_for_each_entry(zdev, &zcrypt_device_list, list) {
340 if (!zdev->online ||
341 !zdev->ops->rsa_modexpo_crt ||
342 zdev->min_mod_size > crt->inputdatalength ||
343 zdev->max_mod_size < crt->inputdatalength)
344 continue;
345 if (zdev->short_crt && crt->inputdatalength > 240) {
346 /**
347 * Check inputdata for leading zeros for cards
348 * that can't handle np_prime, bp_key, or
349 * u_mult_inv > 128 bytes.
350 */
351 if (copied == 0) {
352 int len;
353 spin_unlock_bh(&zcrypt_device_lock);
354 /* len is max 256 / 2 - 120 = 8 */
355 len = crt->inputdatalength / 2 - 120;
356 z1 = z2 = z3 = 0;
357 if (copy_from_user(&z1, crt->np_prime, len) ||
358 copy_from_user(&z2, crt->bp_key, len) ||
359 copy_from_user(&z3, crt->u_mult_inv, len))
360 return -EFAULT;
361 copied = 1;
362 /**
363 * We have to restart device lookup -
364 * the device list may have changed by now.
365 */
366 goto restart;
367 }
368 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
369 /* The device can't handle this request. */
370 continue;
371 }
372 zcrypt_device_get(zdev);
373 get_device(&zdev->ap_dev->device);
374 zdev->request_count++;
375 __zcrypt_decrease_preference(zdev);
376 spin_unlock_bh(&zcrypt_device_lock);
377 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
378 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
379 module_put(zdev->ap_dev->drv->driver.owner);
380 }
381 else
382 rc = -EAGAIN;
383 spin_lock_bh(&zcrypt_device_lock);
384 zdev->request_count--;
385 __zcrypt_increase_preference(zdev);
386 put_device(&zdev->ap_dev->device);
387 zcrypt_device_put(zdev);
388 spin_unlock_bh(&zcrypt_device_lock);
389 return rc;
390 }
391 spin_unlock_bh(&zcrypt_device_lock);
392 return -ENODEV;
393}
394
395static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
396{
397 struct zcrypt_device *zdev;
398 int rc;
399
400 spin_lock_bh(&zcrypt_device_lock);
401 list_for_each_entry(zdev, &zcrypt_device_list, list) {
402 if (!zdev->online || !zdev->ops->send_cprb ||
403 (xcRB->user_defined != AUTOSELECT &&
404 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
405 )
406 continue;
407 zcrypt_device_get(zdev);
408 get_device(&zdev->ap_dev->device);
409 zdev->request_count++;
410 __zcrypt_decrease_preference(zdev);
411 spin_unlock_bh(&zcrypt_device_lock);
412 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
413 rc = zdev->ops->send_cprb(zdev, xcRB);
414 module_put(zdev->ap_dev->drv->driver.owner);
415 }
416 else
417 rc = -EAGAIN;
418 spin_lock_bh(&zcrypt_device_lock);
419 zdev->request_count--;
420 __zcrypt_increase_preference(zdev);
421 put_device(&zdev->ap_dev->device);
422 zcrypt_device_put(zdev);
423 spin_unlock_bh(&zcrypt_device_lock);
424 return rc;
425 }
426 spin_unlock_bh(&zcrypt_device_lock);
427 return -ENODEV;
428}
429
430static void zcrypt_status_mask(char status[AP_DEVICES])
431{
432 struct zcrypt_device *zdev;
433
434 memset(status, 0, sizeof(char) * AP_DEVICES);
435 spin_lock_bh(&zcrypt_device_lock);
436 list_for_each_entry(zdev, &zcrypt_device_list, list)
437 status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
438 zdev->online ? zdev->user_space_type : 0x0d;
439 spin_unlock_bh(&zcrypt_device_lock);
440}
441
442static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
443{
444 struct zcrypt_device *zdev;
445
446 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
447 spin_lock_bh(&zcrypt_device_lock);
448 list_for_each_entry(zdev, &zcrypt_device_list, list) {
449 spin_lock(&zdev->ap_dev->lock);
450 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
451 zdev->ap_dev->pendingq_count +
452 zdev->ap_dev->requestq_count;
453 spin_unlock(&zdev->ap_dev->lock);
454 }
455 spin_unlock_bh(&zcrypt_device_lock);
456}
457
458static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
459{
460 struct zcrypt_device *zdev;
461
462 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
463 spin_lock_bh(&zcrypt_device_lock);
464 list_for_each_entry(zdev, &zcrypt_device_list, list) {
465 spin_lock(&zdev->ap_dev->lock);
466 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
467 zdev->ap_dev->total_request_count;
468 spin_unlock(&zdev->ap_dev->lock);
469 }
470 spin_unlock_bh(&zcrypt_device_lock);
471}
472
473static int zcrypt_pendingq_count(void)
474{
475 struct zcrypt_device *zdev;
476 int pendingq_count = 0;
477
478 spin_lock_bh(&zcrypt_device_lock);
479 list_for_each_entry(zdev, &zcrypt_device_list, list) {
480 spin_lock(&zdev->ap_dev->lock);
481 pendingq_count += zdev->ap_dev->pendingq_count;
482 spin_unlock(&zdev->ap_dev->lock);
483 }
484 spin_unlock_bh(&zcrypt_device_lock);
485 return pendingq_count;
486}
487
488static int zcrypt_requestq_count(void)
489{
490 struct zcrypt_device *zdev;
491 int requestq_count = 0;
492
493 spin_lock_bh(&zcrypt_device_lock);
494 list_for_each_entry(zdev, &zcrypt_device_list, list) {
495 spin_lock(&zdev->ap_dev->lock);
496 requestq_count += zdev->ap_dev->requestq_count;
497 spin_unlock(&zdev->ap_dev->lock);
498 }
499 spin_unlock_bh(&zcrypt_device_lock);
500 return requestq_count;
501}
502
503static int zcrypt_count_type(int type)
504{
505 struct zcrypt_device *zdev;
506 int device_count = 0;
507
508 spin_lock_bh(&zcrypt_device_lock);
509 list_for_each_entry(zdev, &zcrypt_device_list, list)
510 if (zdev->user_space_type == type)
511 device_count++;
512 spin_unlock_bh(&zcrypt_device_lock);
513 return device_count;
514}
515
516/**
517 * Old, deprecated combi status call.
518 */
519static long zcrypt_ica_status(struct file *filp, unsigned long arg)
520{
521 struct ica_z90_status *pstat;
522 int ret;
523
524 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
525 if (!pstat)
526 return -ENOMEM;
527 pstat->totalcount = zcrypt_device_count;
528 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
529 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
530 pstat->requestqWaitCount = zcrypt_requestq_count();
531 pstat->pendingqWaitCount = zcrypt_pendingq_count();
532 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
533 pstat->cryptoDomain = ap_domain_index;
534 zcrypt_status_mask(pstat->status);
535 zcrypt_qdepth_mask(pstat->qdepth);
536 ret = 0;
537 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
538 ret = -EFAULT;
539 kfree(pstat);
540 return ret;
541}
542
543static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
544 unsigned long arg)
545{
546 int rc;
547
548 switch (cmd) {
549 case ICARSAMODEXPO: {
550 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
551 struct ica_rsa_modexpo mex;
552 if (copy_from_user(&mex, umex, sizeof(mex)))
553 return -EFAULT;
554 do {
555 rc = zcrypt_rsa_modexpo(&mex);
556 } while (rc == -EAGAIN);
557 if (rc)
558 return rc;
559 return put_user(mex.outputdatalength, &umex->outputdatalength);
560 }
561 case ICARSACRT: {
562 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
563 struct ica_rsa_modexpo_crt crt;
564 if (copy_from_user(&crt, ucrt, sizeof(crt)))
565 return -EFAULT;
566 do {
567 rc = zcrypt_rsa_crt(&crt);
568 } while (rc == -EAGAIN);
569 if (rc)
570 return rc;
571 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
572 }
573 case ZSECSENDCPRB: {
574 struct ica_xcRB __user *uxcRB = (void __user *) arg;
575 struct ica_xcRB xcRB;
576 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
577 return -EFAULT;
578 do {
579 rc = zcrypt_send_cprb(&xcRB);
580 } while (rc == -EAGAIN);
581 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
582 return -EFAULT;
583 return rc;
584 }
585 case Z90STAT_STATUS_MASK: {
586 char status[AP_DEVICES];
587 zcrypt_status_mask(status);
588 if (copy_to_user((char __user *) arg, status,
589 sizeof(char) * AP_DEVICES))
590 return -EFAULT;
591 return 0;
592 }
593 case Z90STAT_QDEPTH_MASK: {
594 char qdepth[AP_DEVICES];
595 zcrypt_qdepth_mask(qdepth);
596 if (copy_to_user((char __user *) arg, qdepth,
597 sizeof(char) * AP_DEVICES))
598 return -EFAULT;
599 return 0;
600 }
601 case Z90STAT_PERDEV_REQCNT: {
602 int reqcnt[AP_DEVICES];
603 zcrypt_perdev_reqcnt(reqcnt);
604 if (copy_to_user((int __user *) arg, reqcnt,
605 sizeof(int) * AP_DEVICES))
606 return -EFAULT;
607 return 0;
608 }
609 case Z90STAT_REQUESTQ_COUNT:
610 return put_user(zcrypt_requestq_count(), (int __user *) arg);
611 case Z90STAT_PENDINGQ_COUNT:
612 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
613 case Z90STAT_TOTALOPEN_COUNT:
614 return put_user(atomic_read(&zcrypt_open_count),
615 (int __user *) arg);
616 case Z90STAT_DOMAIN_INDEX:
617 return put_user(ap_domain_index, (int __user *) arg);
618 /**
619 * Deprecated ioctls. Don't add another device count ioctl,
620 * you can count them yourself in the user space with the
621 * output of the Z90STAT_STATUS_MASK ioctl.
622 */
623 case ICAZ90STATUS:
624 return zcrypt_ica_status(filp, arg);
625 case Z90STAT_TOTALCOUNT:
626 return put_user(zcrypt_device_count, (int __user *) arg);
627 case Z90STAT_PCICACOUNT:
628 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
629 (int __user *) arg);
630 case Z90STAT_PCICCCOUNT:
631 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
632 (int __user *) arg);
633 case Z90STAT_PCIXCCMCL2COUNT:
634 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
635 (int __user *) arg);
636 case Z90STAT_PCIXCCMCL3COUNT:
637 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
638 (int __user *) arg);
639 case Z90STAT_PCIXCCCOUNT:
640 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
641 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
642 (int __user *) arg);
643 case Z90STAT_CEX2CCOUNT:
644 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
645 (int __user *) arg);
646 case Z90STAT_CEX2ACOUNT:
647 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
648 (int __user *) arg);
649 default:
650 /* unknown ioctl number */
651 return -ENOIOCTLCMD;
652 }
653}
654
655#ifdef CONFIG_COMPAT
656/**
657 * ioctl32 conversion routines
658 */
659struct compat_ica_rsa_modexpo {
660 compat_uptr_t inputdata;
661 unsigned int inputdatalength;
662 compat_uptr_t outputdata;
663 unsigned int outputdatalength;
664 compat_uptr_t b_key;
665 compat_uptr_t n_modulus;
666};
667
668static long trans_modexpo32(struct file *filp, unsigned int cmd,
669 unsigned long arg)
670{
671 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
672 struct compat_ica_rsa_modexpo mex32;
673 struct ica_rsa_modexpo mex64;
674 long rc;
675
676 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
677 return -EFAULT;
678 mex64.inputdata = compat_ptr(mex32.inputdata);
679 mex64.inputdatalength = mex32.inputdatalength;
680 mex64.outputdata = compat_ptr(mex32.outputdata);
681 mex64.outputdatalength = mex32.outputdatalength;
682 mex64.b_key = compat_ptr(mex32.b_key);
683 mex64.n_modulus = compat_ptr(mex32.n_modulus);
684 do {
685 rc = zcrypt_rsa_modexpo(&mex64);
686 } while (rc == -EAGAIN);
687 if (!rc)
688 rc = put_user(mex64.outputdatalength,
689 &umex32->outputdatalength);
690 return rc;
691}
692
693struct compat_ica_rsa_modexpo_crt {
694 compat_uptr_t inputdata;
695 unsigned int inputdatalength;
696 compat_uptr_t outputdata;
697 unsigned int outputdatalength;
698 compat_uptr_t bp_key;
699 compat_uptr_t bq_key;
700 compat_uptr_t np_prime;
701 compat_uptr_t nq_prime;
702 compat_uptr_t u_mult_inv;
703};
704
705static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
706 unsigned long arg)
707{
708 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
709 struct compat_ica_rsa_modexpo_crt crt32;
710 struct ica_rsa_modexpo_crt crt64;
711 long rc;
712
713 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
714 return -EFAULT;
715 crt64.inputdata = compat_ptr(crt32.inputdata);
716 crt64.inputdatalength = crt32.inputdatalength;
717 crt64.outputdata= compat_ptr(crt32.outputdata);
718 crt64.outputdatalength = crt32.outputdatalength;
719 crt64.bp_key = compat_ptr(crt32.bp_key);
720 crt64.bq_key = compat_ptr(crt32.bq_key);
721 crt64.np_prime = compat_ptr(crt32.np_prime);
722 crt64.nq_prime = compat_ptr(crt32.nq_prime);
723 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
724 do {
725 rc = zcrypt_rsa_crt(&crt64);
726 } while (rc == -EAGAIN);
727 if (!rc)
728 rc = put_user(crt64.outputdatalength,
729 &ucrt32->outputdatalength);
730 return rc;
731}
732
733struct compat_ica_xcRB {
734 unsigned short agent_ID;
735 unsigned int user_defined;
736 unsigned short request_ID;
737 unsigned int request_control_blk_length;
738 unsigned char padding1[16 - sizeof (compat_uptr_t)];
739 compat_uptr_t request_control_blk_addr;
740 unsigned int request_data_length;
741 char padding2[16 - sizeof (compat_uptr_t)];
742 compat_uptr_t request_data_address;
743 unsigned int reply_control_blk_length;
744 char padding3[16 - sizeof (compat_uptr_t)];
745 compat_uptr_t reply_control_blk_addr;
746 unsigned int reply_data_length;
747 char padding4[16 - sizeof (compat_uptr_t)];
748 compat_uptr_t reply_data_addr;
749 unsigned short priority_window;
750 unsigned int status;
751} __attribute__((packed));
752
753static long trans_xcRB32(struct file *filp, unsigned int cmd,
754 unsigned long arg)
755{
756 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
757 struct compat_ica_xcRB xcRB32;
758 struct ica_xcRB xcRB64;
759 long rc;
760
761 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
762 return -EFAULT;
763 xcRB64.agent_ID = xcRB32.agent_ID;
764 xcRB64.user_defined = xcRB32.user_defined;
765 xcRB64.request_ID = xcRB32.request_ID;
766 xcRB64.request_control_blk_length =
767 xcRB32.request_control_blk_length;
768 xcRB64.request_control_blk_addr =
769 compat_ptr(xcRB32.request_control_blk_addr);
770 xcRB64.request_data_length =
771 xcRB32.request_data_length;
772 xcRB64.request_data_address =
773 compat_ptr(xcRB32.request_data_address);
774 xcRB64.reply_control_blk_length =
775 xcRB32.reply_control_blk_length;
776 xcRB64.reply_control_blk_addr =
777 compat_ptr(xcRB32.reply_control_blk_addr);
778 xcRB64.reply_data_length = xcRB32.reply_data_length;
779 xcRB64.reply_data_addr =
780 compat_ptr(xcRB32.reply_data_addr);
781 xcRB64.priority_window = xcRB32.priority_window;
782 xcRB64.status = xcRB32.status;
783 do {
784 rc = zcrypt_send_cprb(&xcRB64);
785 } while (rc == -EAGAIN);
786 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
787 xcRB32.reply_data_length = xcRB64.reply_data_length;
788 xcRB32.status = xcRB64.status;
789 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
790 return -EFAULT;
791 return rc;
792}
793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg)
796{
797 if (cmd == ICARSAMODEXPO)
798 return trans_modexpo32(filp, cmd, arg);
799 if (cmd == ICARSACRT)
800 return trans_modexpo_crt32(filp, cmd, arg);
801 if (cmd == ZSECSENDCPRB)
802 return trans_xcRB32(filp, cmd, arg);
803 return zcrypt_unlocked_ioctl(filp, cmd, arg);
804}
805#endif
806
807/**
808 * Misc device file operations.
809 */
810static struct file_operations zcrypt_fops = {
811 .owner = THIS_MODULE,
812 .read = zcrypt_read,
813 .write = zcrypt_write,
814 .unlocked_ioctl = zcrypt_unlocked_ioctl,
815#ifdef CONFIG_COMPAT
816 .compat_ioctl = zcrypt_compat_ioctl,
817#endif
818 .open = zcrypt_open,
819 .release = zcrypt_release
820};
821
822/**
823 * Misc device.
824 */
825static struct miscdevice zcrypt_misc_device = {
826 .minor = MISC_DYNAMIC_MINOR,
827 .name = "z90crypt",
828 .fops = &zcrypt_fops,
829};
830
831/**
832 * Deprecated /proc entry support.
833 */
834static struct proc_dir_entry *zcrypt_entry;
835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len)
838{
839 int hl, i;
840
841 hl = 0;
842 for (i = 0; i < len; i++)
843 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
844 hl += sprintf(outaddr+hl, " ");
845 return hl;
846}
847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len)
850{
851 int hl, inl, c, cx;
852
853 hl = sprintf(outaddr, " ");
854 inl = 0;
855 for (c = 0; c < (len / 16); c++) {
856 hl += sprintcl(outaddr+hl, addr+inl, 16);
857 inl += 16;
858 }
859 cx = len%16;
860 if (cx) {
861 hl += sprintcl(outaddr+hl, addr+inl, cx);
862 inl += cx;
863 }
864 hl += sprintf(outaddr+hl, "\n");
865 return hl;
866}
867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len)
870{
871 int hl, inl, r, rx;
872
873 hl = sprintf(outaddr, "\n%s\n", title);
874 inl = 0;
875 for (r = 0; r < (len / 64); r++) {
876 hl += sprintrw(outaddr+hl, addr+inl, 64);
877 inl += 64;
878 }
879 rx = len % 64;
880 if (rx) {
881 hl += sprintrw(outaddr+hl, addr+inl, rx);
882 inl += rx;
883 }
884 hl += sprintf(outaddr+hl, "\n");
885 return hl;
886}
887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len)
890{
891 int hl, r;
892
893 hl = sprintf(outaddr, "\n%s\n", title);
894 for (r = 0; r < len; r++) {
895 if ((r % 8) == 0)
896 hl += sprintf(outaddr+hl, " ");
897 hl += sprintf(outaddr+hl, "%08X ", array[r]);
898 if ((r % 8) == 7)
899 hl += sprintf(outaddr+hl, "\n");
900 }
901 hl += sprintf(outaddr+hl, "\n");
902 return hl;
903}
904
905static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
906 int count, int *eof, void *data)
907{
908 unsigned char *workarea;
909 int len;
910
911 len = 0;
912
913 /* resp_buff is a page. Use the right half for a work area */
914 workarea = resp_buff + 2000;
915 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
916 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
917 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
918 ap_domain_index);
919 len += sprintf(resp_buff + len, "Total device count: %d\n",
920 zcrypt_device_count);
921 len += sprintf(resp_buff + len, "PCICA count: %d\n",
922 zcrypt_count_type(ZCRYPT_PCICA));
923 len += sprintf(resp_buff + len, "PCICC count: %d\n",
924 zcrypt_count_type(ZCRYPT_PCICC));
925 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
926 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
927 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
928 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
929 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
930 zcrypt_count_type(ZCRYPT_CEX2C));
931 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
932 zcrypt_count_type(ZCRYPT_CEX2A));
933 len += sprintf(resp_buff + len, "requestq count: %d\n",
934 zcrypt_requestq_count());
935 len += sprintf(resp_buff + len, "pendingq count: %d\n",
936 zcrypt_pendingq_count());
937 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
938 atomic_read(&zcrypt_open_count));
939 zcrypt_status_mask(workarea);
940 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
941 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
942 resp_buff+len, workarea, AP_DEVICES);
943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1;
950 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
951 return len;
952}
953
954static void zcrypt_disable_card(int index)
955{
956 struct zcrypt_device *zdev;
957
958 spin_lock_bh(&zcrypt_device_lock);
959 list_for_each_entry(zdev, &zcrypt_device_list, list)
960 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
961 zdev->online = 0;
962 ap_flush_queue(zdev->ap_dev);
963 break;
964 }
965 spin_unlock_bh(&zcrypt_device_lock);
966}
967
968static void zcrypt_enable_card(int index)
969{
970 struct zcrypt_device *zdev;
971
972 spin_lock_bh(&zcrypt_device_lock);
973 list_for_each_entry(zdev, &zcrypt_device_list, list)
974 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
975 zdev->online = 1;
976 break;
977 }
978 spin_unlock_bh(&zcrypt_device_lock);
979}
980
981static int zcrypt_status_write(struct file *file, const char __user *buffer,
982 unsigned long count, void *data)
983{
984 unsigned char *lbuf, *ptr;
985 unsigned long local_count;
986 int j;
987
988 if (count <= 0)
989 return 0;
990
991#define LBUFSIZE 1200UL
992 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
993 if (!lbuf) {
994 PRINTK("kmalloc failed!\n");
995 return 0;
996 }
997
998 local_count = min(LBUFSIZE - 1, count);
999 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1000 kfree(lbuf);
1001 return -EFAULT;
1002 }
1003 lbuf[local_count] = '\0';
1004
1005 ptr = strstr(lbuf, "Online devices");
1006 if (!ptr) {
1007 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1008 goto out;
1009 }
1010 ptr = strstr(ptr, "\n");
1011 if (!ptr) {
1012 PRINTK("Unable to parse data (missing newline "
1013 "after \"Online devices\")\n");
1014 goto out;
1015 }
1016 ptr++;
1017
1018 if (strstr(ptr, "Waiting work element counts") == NULL) {
1019 PRINTK("Unable to parse data (missing "
1020 "\"Waiting work element counts\")\n");
1021 goto out;
1022 }
1023
1024 for (j = 0; j < 64 && *ptr; ptr++) {
1025 /**
1026 * '0' for no device, '1' for PCICA, '2' for PCICC,
1027 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1028 * '5' for CEX2C and '6' for CEX2A'
1029 */
1030 if (*ptr >= '0' && *ptr <= '6')
1031 j++;
1032 else if (*ptr == 'd' || *ptr == 'D')
1033 zcrypt_disable_card(j++);
1034 else if (*ptr == 'e' || *ptr == 'E')
1035 zcrypt_enable_card(j++);
1036 else if (*ptr != ' ' && *ptr != '\t')
1037 break;
1038 }
1039out:
1040 kfree(lbuf);
1041 return count;
1042}
1043
1044/**
1045 * The module initialization code.
1046 */
1047int __init zcrypt_api_init(void)
1048{
1049 int rc;
1050
1051 /* Register the request sprayer. */
1052 rc = misc_register(&zcrypt_misc_device);
1053 if (rc < 0) {
1054 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1055 zcrypt_misc_device.minor, rc);
1056 goto out;
1057 }
1058
1059 /* Set up the proc file system */
1060 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1061 if (!zcrypt_entry) {
1062 PRINTK("Couldn't create z90crypt proc entry\n");
1063 rc = -ENOMEM;
1064 goto out_misc;
1065 }
1066 zcrypt_entry->nlink = 1;
1067 zcrypt_entry->data = NULL;
1068 zcrypt_entry->read_proc = zcrypt_status_read;
1069 zcrypt_entry->write_proc = zcrypt_status_write;
1070
1071 return 0;
1072
1073out_misc:
1074 misc_deregister(&zcrypt_misc_device);
1075out:
1076 return rc;
1077}
1078
1079/**
1080 * The module termination code.
1081 */
1082void zcrypt_api_exit(void)
1083{
1084 remove_proc_entry("driver/z90crypt", NULL);
1085 misc_deregister(&zcrypt_misc_device);
1086}
1087
1088#ifndef CONFIG_ZCRYPT_MONOLITHIC
1089module_init(zcrypt_api_init);
1090module_exit(zcrypt_api_exit);
1091#endif
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 000000000000..de4877ee618f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,141 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_
32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h"
62#include <asm/zcrypt.h>
63
64/* deprecated status calls */
65#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
66#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
67
68/**
69 * This structure is deprecated and the corresponding ioctl() has been
70 * replaced with individual ioctl()s for each piece of data!
71 */
72struct ica_z90_status {
73 int totalcount;
74 int leedslitecount; // PCICA
75 int leeds2count; // PCICC
76 // int PCIXCCCount; is not in struct for backward compatibility
77 int requestqWaitCount;
78 int pendingqWaitCount;
79 int totalOpenCount;
80 int cryptoDomain;
81 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
82 // 5=CEX2C
83 unsigned char status[64];
84 // qdepth: # work elements waiting for each device
85 unsigned char qdepth[64];
86};
87
88/**
89 * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
90 * PCIXCC_MCL3, CEX2C, or CEX2A
91 *
92 * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
93 * Internal Code (LIC) (EC J12220 level 29).
94 * PCIXCC_MCL2 refers to any LIC before this level.
95 */
96#define ZCRYPT_PCICA 1
97#define ZCRYPT_PCICC 2
98#define ZCRYPT_PCIXCC_MCL2 3
99#define ZCRYPT_PCIXCC_MCL3 4
100#define ZCRYPT_CEX2C 5
101#define ZCRYPT_CEX2A 6
102
103struct zcrypt_device;
104
105struct zcrypt_ops {
106 long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
107 long (*rsa_modexpo_crt)(struct zcrypt_device *,
108 struct ica_rsa_modexpo_crt *);
109 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
110};
111
112struct zcrypt_device {
113 struct list_head list; /* Device list. */
114 spinlock_t lock; /* Per device lock. */
115 struct kref refcount; /* device refcounting */
116 struct ap_device *ap_dev; /* The "real" ap device. */
117 struct zcrypt_ops *ops; /* Crypto operations. */
118 int online; /* User online/offline */
119
120 int user_space_type; /* User space device id. */
121 char *type_string; /* User space device name. */
122 int min_mod_size; /* Min number of bits. */
123 int max_mod_size; /* Max number of bits. */
124 int short_crt; /* Card has crt length restriction. */
125 int speed_rating; /* Speed of the crypto device. */
126
127 int request_count; /* # current requests. */
128
129 struct ap_message reply; /* Per-device reply structure. */
130};
131
132struct zcrypt_device *zcrypt_device_alloc(size_t);
133void zcrypt_device_free(struct zcrypt_device *);
134void zcrypt_device_get(struct zcrypt_device *);
135int zcrypt_device_put(struct zcrypt_device *);
136int zcrypt_device_register(struct zcrypt_device *);
137void zcrypt_device_unregister(struct zcrypt_device *);
138int zcrypt_api_init(void);
139void zcrypt_api_exit(void);
140
141#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 000000000000..8dbcf0eef3e5
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,350 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cca_key.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CCA_KEY_H_
29#define _ZCRYPT_CCA_KEY_H_
30
31struct T6_keyBlock_hdr {
32 unsigned short blen;
33 unsigned short ulen;
34 unsigned short flags;
35};
36
37/**
38 * mapping for the cca private ME key token.
39 * Three parts of interest here: the header, the private section and
40 * the public section.
41 *
42 * mapping for the cca key token header
43 */
44struct cca_token_hdr {
45 unsigned char token_identifier;
46 unsigned char version;
47 unsigned short token_length;
48 unsigned char reserved[4];
49} __attribute__((packed));
50
51#define CCA_TKN_HDR_ID_EXT 0x1E
52
53/**
54 * mapping for the cca private ME section
55 */
56struct cca_private_ext_ME_sec {
57 unsigned char section_identifier;
58 unsigned char version;
59 unsigned short section_length;
60 unsigned char private_key_hash[20];
61 unsigned char reserved1[4];
62 unsigned char key_format;
63 unsigned char reserved2;
64 unsigned char key_name_hash[20];
65 unsigned char key_use_flags[4];
66 unsigned char reserved3[6];
67 unsigned char reserved4[24];
68 unsigned char confounder[24];
69 unsigned char exponent[128];
70 unsigned char modulus[128];
71} __attribute__((packed));
72
73#define CCA_PVT_USAGE_ALL 0x80
74
75/**
76 * mapping for the cca public section
77 * In a private key, the modulus doesn't appear in the public
78 * section. So, an arbitrary public exponent of 0x010001 will be
79 * used, for a section length of 0x0F always.
80 */
81struct cca_public_sec {
82 unsigned char section_identifier;
83 unsigned char version;
84 unsigned short section_length;
85 unsigned char reserved[2];
86 unsigned short exponent_len;
87 unsigned short modulus_bit_len;
88 unsigned short modulus_byte_len; /* In a private key, this is 0 */
89} __attribute__((packed));
90
91/**
92 * mapping for the cca private CRT key 'token'
93 * The first three parts (the only parts considered in this release)
94 * are: the header, the private section and the public section.
95 * The header and public section are the same as for the
96 * struct cca_private_ext_ME
97 *
98 * Following the structure are the quantities p, q, dp, dq, u, pad,
99 * and modulus, in that order, where pad_len is the modulo 8
100 * complement of the residue modulo 8 of the sum of
101 * (p_len + q_len + dp_len + dq_len + u_len).
102 */
103struct cca_pvt_ext_CRT_sec {
104 unsigned char section_identifier;
105 unsigned char version;
106 unsigned short section_length;
107 unsigned char private_key_hash[20];
108 unsigned char reserved1[4];
109 unsigned char key_format;
110 unsigned char reserved2;
111 unsigned char key_name_hash[20];
112 unsigned char key_use_flags[4];
113 unsigned short p_len;
114 unsigned short q_len;
115 unsigned short dp_len;
116 unsigned short dq_len;
117 unsigned short u_len;
118 unsigned short mod_len;
119 unsigned char reserved3[4];
120 unsigned short pad_len;
121 unsigned char reserved4[52];
122 unsigned char confounder[8];
123} __attribute__((packed));
124
125#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
126#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
127
128/**
129 * Set up private key fields of a type6 MEX message.
130 * Note that all numerics in the key token are big-endian,
131 * while the entries in the key block header are little-endian.
132 *
133 * @mex: pointer to user input data
134 * @p: pointer to memory area for the key
135 *
136 * Returns the size of the key area or -EFAULT
137 */
138static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
139 void *p, int big_endian)
140{
141 static struct cca_token_hdr static_pvt_me_hdr = {
142 .token_identifier = 0x1E,
143 .token_length = 0x0183,
144 };
145 static struct cca_private_ext_ME_sec static_pvt_me_sec = {
146 .section_identifier = 0x02,
147 .section_length = 0x016C,
148 .key_use_flags = {0x80,0x00,0x00,0x00},
149 };
150 static struct cca_public_sec static_pub_me_sec = {
151 .section_identifier = 0x04,
152 .section_length = 0x000F,
153 .exponent_len = 0x0003,
154 };
155 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
156 struct {
157 struct T6_keyBlock_hdr t6_hdr;
158 struct cca_token_hdr pvtMeHdr;
159 struct cca_private_ext_ME_sec pvtMeSec;
160 struct cca_public_sec pubMeSec;
161 char exponent[3];
162 } __attribute__((packed)) *key = p;
163 unsigned char *temp;
164
165 memset(key, 0, sizeof(*key));
166
167 if (big_endian) {
168 key->t6_hdr.blen = cpu_to_be16(0x189);
169 key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
170 } else {
171 key->t6_hdr.blen = cpu_to_le16(0x189);
172 key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
173 }
174 key->pvtMeHdr = static_pvt_me_hdr;
175 key->pvtMeSec = static_pvt_me_sec;
176 key->pubMeSec = static_pub_me_sec;
177 /**
178 * In a private key, the modulus doesn't appear in the public
179 * section. So, an arbitrary public exponent of 0x010001 will be
180 * used.
181 */
182 memcpy(key->exponent, pk_exponent, 3);
183
184 /* key parameter block */
185 temp = key->pvtMeSec.exponent +
186 sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
187 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
188 return -EFAULT;
189
190 /* modulus */
191 temp = key->pvtMeSec.modulus +
192 sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
193 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
194 return -EFAULT;
195 key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
196 return sizeof(*key);
197}
198
199/**
200 * Set up private key fields of a type6 MEX message. The _pad variant
201 * strips leading zeroes from the b_key.
202 * Note that all numerics in the key token are big-endian,
203 * while the entries in the key block header are little-endian.
204 *
205 * @mex: pointer to user input data
206 * @p: pointer to memory area for the key
207 *
208 * Returns the size of the key area or -EFAULT
209 */
210static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
211 void *p, int big_endian)
212{
213 static struct cca_token_hdr static_pub_hdr = {
214 .token_identifier = 0x1E,
215 };
216 static struct cca_public_sec static_pub_sec = {
217 .section_identifier = 0x04,
218 };
219 struct {
220 struct T6_keyBlock_hdr t6_hdr;
221 struct cca_token_hdr pubHdr;
222 struct cca_public_sec pubSec;
223 char exponent[0];
224 } __attribute__((packed)) *key = p;
225 unsigned char *temp;
226 int i;
227
228 memset(key, 0, sizeof(*key));
229
230 key->pubHdr = static_pub_hdr;
231 key->pubSec = static_pub_sec;
232
233 /* key parameter block */
234 temp = key->exponent;
235 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
236 return -EFAULT;
237 /* Strip leading zeroes from b_key. */
238 for (i = 0; i < mex->inputdatalength; i++)
239 if (temp[i])
240 break;
241 if (i >= mex->inputdatalength)
242 return -EINVAL;
243 memmove(temp, temp + i, mex->inputdatalength - i);
244 temp += mex->inputdatalength - i;
245 /* modulus */
246 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
247 return -EFAULT;
248
249 key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
250 key->pubSec.modulus_byte_len = mex->inputdatalength;
251 key->pubSec.exponent_len = mex->inputdatalength - i;
252 key->pubSec.section_length = sizeof(key->pubSec) +
253 2*mex->inputdatalength - i;
254 key->pubHdr.token_length =
255 key->pubSec.section_length + sizeof(key->pubHdr);
256 if (big_endian) {
257 key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
258 key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
259 } else {
260 key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
261 key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
262 }
263 return sizeof(*key) + 2*mex->inputdatalength - i;
264}
265
266/**
267 * Set up private key fields of a type6 CRT message.
268 * Note that all numerics in the key token are big-endian,
269 * while the entries in the key block header are little-endian.
270 *
271 * @mex: pointer to user input data
272 * @p: pointer to memory area for the key
273 *
274 * Returns the size of the key area or -EFAULT
275 */
276static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
277 void *p, int big_endian)
278{
279 static struct cca_public_sec static_cca_pub_sec = {
280 .section_identifier = 4,
281 .section_length = 0x000f,
282 .exponent_len = 0x0003,
283 };
284 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
285 struct {
286 struct T6_keyBlock_hdr t6_hdr;
287 struct cca_token_hdr token;
288 struct cca_pvt_ext_CRT_sec pvt;
289 char key_parts[0];
290 } __attribute__((packed)) *key = p;
291 struct cca_public_sec *pub;
292 int short_len, long_len, pad_len, key_len, size;
293
294 memset(key, 0, sizeof(*key));
295
296 short_len = crt->inputdatalength / 2;
297 long_len = short_len + 8;
298 pad_len = -(3*long_len + 2*short_len) & 7;
299 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
300 size = sizeof(*key) + key_len + sizeof(*pub) + 3;
301
302 /* parameter block.key block */
303 if (big_endian) {
304 key->t6_hdr.blen = cpu_to_be16(size);
305 key->t6_hdr.ulen = cpu_to_be16(size - 2);
306 } else {
307 key->t6_hdr.blen = cpu_to_le16(size);
308 key->t6_hdr.ulen = cpu_to_le16(size - 2);
309 }
310
311 /* key token header */
312 key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
313 key->token.token_length = size - 6;
314
315 /* private section */
316 key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
317 key->pvt.section_length = sizeof(key->pvt) + key_len;
318 key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
319 key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
320 key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
321 key->pvt.q_len = key->pvt.dq_len = short_len;
322 key->pvt.mod_len = crt->inputdatalength;
323 key->pvt.pad_len = pad_len;
324
325 /* key parts */
326 if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
327 copy_from_user(key->key_parts + long_len,
328 crt->nq_prime, short_len) ||
329 copy_from_user(key->key_parts + long_len + short_len,
330 crt->bp_key, long_len) ||
331 copy_from_user(key->key_parts + 2*long_len + short_len,
332 crt->bq_key, short_len) ||
333 copy_from_user(key->key_parts + 2*long_len + 2*short_len,
334 crt->u_mult_inv, long_len))
335 return -EFAULT;
336 memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
337 0xff, crt->inputdatalength);
338 pub = (struct cca_public_sec *)(key->key_parts + key_len);
339 *pub = static_cca_pub_sec;
340 pub->modulus_bit_len = 8 * crt->inputdatalength;
341 /**
342 * In a private key, the modulus doesn't appear in the public
343 * section. So, an arbitrary public exponent of 0x010001 will be
344 * used.
345 */
346 memcpy((char *) (pub + 1), pk_exponent, 3);
347 return size;
348}
349
350#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 000000000000..a62b00083d0c
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,435 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_cex2a.h"
39
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define CEX2A_SPEED_RATING 970
44
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define CEX2A_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
64static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
65static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_cex2a_driver = {
69 .probe = zcrypt_cex2a_probe,
70 .remove = zcrypt_cex2a_remove,
71 .receive = zcrypt_cex2a_receive,
72 .ids = zcrypt_cex2a_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type50 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *mod, *exp, *inp;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type50_meb1_msg *meb1 = ap_msg->message;
95 memset(meb1, 0, sizeof(*meb1));
96 ap_msg->length = sizeof(*meb1);
97 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
98 meb1->header.msg_len = sizeof(*meb1);
99 meb1->keyblock_type = TYPE50_MEB1_FMT;
100 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
101 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
102 inp = meb1->message + sizeof(meb1->message) - mod_len;
103 } else {
104 struct type50_meb2_msg *meb2 = ap_msg->message;
105 memset(meb2, 0, sizeof(*meb2));
106 ap_msg->length = sizeof(*meb2);
107 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
108 meb2->header.msg_len = sizeof(*meb2);
109 meb2->keyblock_type = TYPE50_MEB2_FMT;
110 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
111 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
112 inp = meb2->message + sizeof(meb2->message) - mod_len;
113 }
114
115 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
116 copy_from_user(exp, mex->b_key, mod_len) ||
117 copy_from_user(inp, mex->inputdata, mod_len))
118 return -EFAULT;
119 return 0;
120}
121
122/**
123 * Convert a ICACRT message to a type50 CRT message.
124 *
125 * @zdev: crypto device pointer
126 * @zreq: crypto request pointer
127 * @crt: pointer to user input data
128 *
129 * Returns 0 on success or -EFAULT.
130 */
131static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
132 struct ap_message *ap_msg,
133 struct ica_rsa_modexpo_crt *crt)
134{
135 int mod_len, short_len, long_len, long_offset;
136 unsigned char *p, *q, *dp, *dq, *u, *inp;
137
138 mod_len = crt->inputdatalength;
139 short_len = mod_len / 2;
140 long_len = mod_len / 2 + 8;
141
142 /*
143 * CEX2A cannot handle p, dp, or U > 128 bytes.
144 * If we have one of these, we need to do extra checking.
145 */
146 if (long_len > 128) {
147 /*
148 * zcrypt_rsa_crt already checked for the leading
149 * zeroes of np_prime, bp_key and u_mult_inc.
150 */
151 long_offset = long_len - 128;
152 long_len = 128;
153 } else
154 long_offset = 0;
155
156 /*
157 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
158 * the larger message structure.
159 */
160 if (long_len <= 64) {
161 struct type50_crb1_msg *crb1 = ap_msg->message;
162 memset(crb1, 0, sizeof(*crb1));
163 ap_msg->length = sizeof(*crb1);
164 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
165 crb1->header.msg_len = sizeof(*crb1);
166 crb1->keyblock_type = TYPE50_CRB1_FMT;
167 p = crb1->p + sizeof(crb1->p) - long_len;
168 q = crb1->q + sizeof(crb1->q) - short_len;
169 dp = crb1->dp + sizeof(crb1->dp) - long_len;
170 dq = crb1->dq + sizeof(crb1->dq) - short_len;
171 u = crb1->u + sizeof(crb1->u) - long_len;
172 inp = crb1->message + sizeof(crb1->message) - mod_len;
173 } else {
174 struct type50_crb2_msg *crb2 = ap_msg->message;
175 memset(crb2, 0, sizeof(*crb2));
176 ap_msg->length = sizeof(*crb2);
177 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
178 crb2->header.msg_len = sizeof(*crb2);
179 crb2->keyblock_type = TYPE50_CRB2_FMT;
180 p = crb2->p + sizeof(crb2->p) - long_len;
181 q = crb2->q + sizeof(crb2->q) - short_len;
182 dp = crb2->dp + sizeof(crb2->dp) - long_len;
183 dq = crb2->dq + sizeof(crb2->dq) - short_len;
184 u = crb2->u + sizeof(crb2->u) - long_len;
185 inp = crb2->message + sizeof(crb2->message) - mod_len;
186 }
187
188 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
189 copy_from_user(q, crt->nq_prime, short_len) ||
190 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
191 copy_from_user(dq, crt->bq_key, short_len) ||
192 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
193 copy_from_user(inp, crt->inputdata, mod_len))
194 return -EFAULT;
195
196
197 return 0;
198}
199
200/**
201 * Copy results from a type 80 reply message back to user space.
202 *
203 * @zdev: crypto device pointer
204 * @reply: reply AP message.
205 * @data: pointer to user output data
206 * @length: size of user output data
207 *
208 * Returns 0 on success or -EFAULT.
209 */
210static int convert_type80(struct zcrypt_device *zdev,
211 struct ap_message *reply,
212 char __user *outputdata,
213 unsigned int outputdatalength)
214{
215 struct type80_hdr *t80h = reply->message;
216 unsigned char *data;
217
218 if (t80h->len < sizeof(*t80h) + outputdatalength) {
219 /* The result is too short, the CEX2A card may not do that.. */
220 zdev->online = 0;
221 return -EAGAIN; /* repeat the request on a different device. */
222 }
223 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
224 data = reply->message + t80h->len - outputdatalength;
225 if (copy_to_user(outputdata, data, outputdatalength))
226 return -EFAULT;
227 return 0;
228}
229
230static int convert_response(struct zcrypt_device *zdev,
231 struct ap_message *reply,
232 char __user *outputdata,
233 unsigned int outputdatalength)
234{
235 /* Response type byte is the second byte in the response. */
236 switch (((unsigned char *) reply->message)[1]) {
237 case TYPE82_RSP_CODE:
238 case TYPE88_RSP_CODE:
239 return convert_error(zdev, reply);
240 case TYPE80_RSP_CODE:
241 return convert_type80(zdev, reply,
242 outputdata, outputdatalength);
243 default: /* Unknown response type, this should NEVER EVER happen */
244 PRINTK("Unrecognized Message Header: %08x%08x\n",
245 *(unsigned int *) reply->message,
246 *(unsigned int *) (reply->message+4));
247 zdev->online = 0;
248 return -EAGAIN; /* repeat the request on a different device. */
249 }
250}
251
252/**
253 * This function is called from the AP bus code after a crypto request
254 * "msg" has finished with the reply message "reply".
255 * It is called from tasklet context.
256 * @ap_dev: pointer to the AP device
257 * @msg: pointer to the AP message
258 * @reply: pointer to the AP reply message
259 */
260static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
261 struct ap_message *msg,
262 struct ap_message *reply)
263{
264 static struct error_hdr error_reply = {
265 .type = TYPE82_RSP_CODE,
266 .reply_code = REP82_ERROR_MACHINE_FAILURE,
267 };
268 struct type80_hdr *t80h = reply->message;
269 int length;
270
271 /* Copy the reply message to the request message buffer. */
272 if (IS_ERR(reply))
273 memcpy(msg->message, &error_reply, sizeof(error_reply));
274 else if (t80h->type == TYPE80_RSP_CODE) {
275 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
276 memcpy(msg->message, reply->message, length);
277 } else
278 memcpy(msg->message, reply->message, sizeof error_reply);
279 complete((struct completion *) msg->private);
280}
281
282static atomic_t zcrypt_step = ATOMIC_INIT(0);
283
284/**
285 * The request distributor calls this function if it picked the CEX2A
286 * device to handle a modexpo request.
287 * @zdev: pointer to zcrypt_device structure that identifies the
288 * CEX2A device to the request distributor
289 * @mex: pointer to the modexpo request buffer
290 */
291static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
292 struct ica_rsa_modexpo *mex)
293{
294 struct ap_message ap_msg;
295 struct completion work;
296 int rc;
297
298 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
299 if (!ap_msg.message)
300 return -ENOMEM;
301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
302 atomic_inc_return(&zcrypt_step);
303 ap_msg.private = &work;
304 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
305 if (rc)
306 goto out_free;
307 init_completion(&work);
308 ap_queue_message(zdev->ap_dev, &ap_msg);
309 rc = wait_for_completion_interruptible_timeout(
310 &work, CEX2A_CLEANUP_TIME);
311 if (rc > 0)
312 rc = convert_response(zdev, &ap_msg, mex->outputdata,
313 mex->outputdatalength);
314 else {
315 /* Signal pending or message timed out. */
316 ap_cancel_message(zdev->ap_dev, &ap_msg);
317 if (rc == 0)
318 /* Message timed out. */
319 rc = -ETIME;
320 }
321out_free:
322 kfree(ap_msg.message);
323 return rc;
324}
325
326/**
327 * The request distributor calls this function if it picked the CEX2A
328 * device to handle a modexpo_crt request.
329 * @zdev: pointer to zcrypt_device structure that identifies the
330 * CEX2A device to the request distributor
331 * @crt: pointer to the modexpoc_crt request buffer
332 */
333static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
334 struct ica_rsa_modexpo_crt *crt)
335{
336 struct ap_message ap_msg;
337 struct completion work;
338 int rc;
339
340 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
341 if (!ap_msg.message)
342 return -ENOMEM;
343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
344 atomic_inc_return(&zcrypt_step);
345 ap_msg.private = &work;
346 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
347 if (rc)
348 goto out_free;
349 init_completion(&work);
350 ap_queue_message(zdev->ap_dev, &ap_msg);
351 rc = wait_for_completion_interruptible_timeout(
352 &work, CEX2A_CLEANUP_TIME);
353 if (rc > 0)
354 rc = convert_response(zdev, &ap_msg, crt->outputdata,
355 crt->outputdatalength);
356 else {
357 /* Signal pending or message timed out. */
358 ap_cancel_message(zdev->ap_dev, &ap_msg);
359 if (rc == 0)
360 /* Message timed out. */
361 rc = -ETIME;
362 }
363out_free:
364 kfree(ap_msg.message);
365 return rc;
366}
367
368/**
369 * The crypto operations for a CEX2A card.
370 */
371static struct zcrypt_ops zcrypt_cex2a_ops = {
372 .rsa_modexpo = zcrypt_cex2a_modexpo,
373 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
374};
375
376/**
377 * Probe function for CEX2A cards. It always accepts the AP device
378 * since the bus_match already checked the hardware type.
379 * @ap_dev: pointer to the AP device.
380 */
381static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
382{
383 struct zcrypt_device *zdev;
384 int rc;
385
386 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
387 if (!zdev)
388 return -ENOMEM;
389 zdev->ap_dev = ap_dev;
390 zdev->ops = &zcrypt_cex2a_ops;
391 zdev->online = 1;
392 zdev->user_space_type = ZCRYPT_CEX2A;
393 zdev->type_string = "CEX2A";
394 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
395 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
396 zdev->short_crt = 1;
397 zdev->speed_rating = CEX2A_SPEED_RATING;
398 ap_dev->reply = &zdev->reply;
399 ap_dev->private = zdev;
400 rc = zcrypt_device_register(zdev);
401 if (rc)
402 goto out_free;
403 return 0;
404
405out_free:
406 ap_dev->private = NULL;
407 zcrypt_device_free(zdev);
408 return rc;
409}
410
411/**
412 * This is called to remove the extended CEX2A driver information
413 * if an AP device is removed.
414 */
415static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
416{
417 struct zcrypt_device *zdev = ap_dev->private;
418
419 zcrypt_device_unregister(zdev);
420}
421
422int __init zcrypt_cex2a_init(void)
423{
424 return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
425}
426
427void __exit zcrypt_cex2a_exit(void)
428{
429 ap_driver_unregister(&zcrypt_cex2a_driver);
430}
431
432#ifndef CONFIG_ZCRYPT_MONOLITHIC
433module_init(zcrypt_cex2a_init);
434module_exit(zcrypt_cex2a_exit);
435#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 000000000000..8f69d1dacab8
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,126 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CEX2A_H_
29#define _ZCRYPT_CEX2A_H_
30
31/**
32 * The type 50 message family is associated with a CEX2A card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type50_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x50 */
44 unsigned short msg_len;
45 unsigned char reserved2;
46 unsigned char ignored;
47 unsigned short reserved3;
48} __attribute__((packed));
49
50#define TYPE50_TYPE_CODE 0x50
51
52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012
56
57/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg {
59 struct type50_hdr header;
60 unsigned short keyblock_type; /* 0x0001 */
61 unsigned char reserved[6];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64 unsigned char message[128];
65} __attribute__((packed));
66
67/* Mod-Exp, with a large modulus */
68struct type50_meb2_msg {
69 struct type50_hdr header;
70 unsigned short keyblock_type; /* 0x0002 */
71 unsigned char reserved[6];
72 unsigned char exponent[256];
73 unsigned char modulus[256];
74 unsigned char message[256];
75} __attribute__((packed));
76
77/* CRT, with a small modulus */
78struct type50_crb1_msg {
79 struct type50_hdr header;
80 unsigned short keyblock_type; /* 0x0011 */
81 unsigned char reserved[6];
82 unsigned char p[64];
83 unsigned char q[64];
84 unsigned char dp[64];
85 unsigned char dq[64];
86 unsigned char u[64];
87 unsigned char message[128];
88} __attribute__((packed));
89
90/* CRT, with a large modulus */
91struct type50_crb2_msg {
92 struct type50_hdr header;
93 unsigned short keyblock_type; /* 0x0012 */
94 unsigned char reserved[6];
95 unsigned char p[128];
96 unsigned char q[128];
97 unsigned char dp[128];
98 unsigned char dq[128];
99 unsigned char u[128];
100 unsigned char message[256];
101} __attribute__((packed));
102
103/**
104 * The type 80 response family is associated with a CEX2A card.
105 *
106 * Note that all unsigned char arrays are right-justified and left-padded
107 * with zeroes.
108 *
109 * Note that all reserved fields must be zeroes.
110 */
111
112#define TYPE80_RSP_CODE 0x80
113
114struct type80_hdr {
115 unsigned char reserved1;
116 unsigned char type; /* 0x80 */
117 unsigned short len;
118 unsigned char code; /* 0x00 */
119 unsigned char reserved2[3];
120 unsigned char reserved3[8];
121} __attribute__((packed));
122
123int zcrypt_cex2a_init(void);
124void zcrypt_cex2a_exit(void);
125
126#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 000000000000..2cb616ba8bec
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,133 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_error.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_ERROR_H_
29#define _ZCRYPT_ERROR_H_
30
31#include "zcrypt_api.h"
32
33/**
34 * Reply Messages
35 *
36 * Error reply messages are of two types:
37 * 82: Error (see below)
38 * 88: Error (see below)
39 * Both type 82 and type 88 have the same structure in the header.
40 *
41 * Request reply messages are of three known types:
42 * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
43 * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
44 * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
45 *
46 */
47struct error_hdr {
48 unsigned char reserved1; /* 0x00 */
49 unsigned char type; /* 0x82 or 0x88 */
50 unsigned char reserved2[2]; /* 0x0000 */
51 unsigned char reply_code; /* reply code */
52 unsigned char reserved3[3]; /* 0x000000 */
53};
54
55#define TYPE82_RSP_CODE 0x82
56#define TYPE88_RSP_CODE 0x88
57
58#define REP82_ERROR_MACHINE_FAILURE 0x10
59#define REP82_ERROR_PREEMPT_FAILURE 0x12
60#define REP82_ERROR_CHECKPT_FAILURE 0x14
61#define REP82_ERROR_MESSAGE_TYPE 0x20
62#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
63#define REP82_ERROR_INVALID_MSG_LEN 0x23
64#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
65#define REP82_ERROR_FORMAT_FIELD 0x29
66#define REP82_ERROR_INVALID_COMMAND 0x30
67#define REP82_ERROR_MALFORMED_MSG 0x40
68#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
69#define REP82_ERROR_WORD_ALIGNMENT 0x60
70#define REP82_ERROR_MESSAGE_LENGTH 0x80
71#define REP82_ERROR_OPERAND_INVALID 0x82
72#define REP82_ERROR_OPERAND_SIZE 0x84
73#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
74#define REP82_ERROR_RESERVED_FIELD 0x88
75#define REP82_ERROR_TRANSPORT_FAIL 0x90
76#define REP82_ERROR_PACKET_TRUNCATED 0xA0
77#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
78
79#define REP88_ERROR_MODULE_FAILURE 0x10
80
81#define REP88_ERROR_MESSAGE_TYPE 0x20
82#define REP88_ERROR_MESSAGE_MALFORMD 0x22
83#define REP88_ERROR_MESSAGE_LENGTH 0x23
84#define REP88_ERROR_RESERVED_FIELD 0x24
85#define REP88_ERROR_KEY_TYPE 0x34
86#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
87#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
88#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
89
90static inline int convert_error(struct zcrypt_device *zdev,
91 struct ap_message *reply)
92{
93 struct error_hdr *ehdr = reply->message;
94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE:
102 case REP82_ERROR_EVEN_MOD_IN_OPND:
103 case REP88_ERROR_MESSAGE_MALFORMD:
104 // REP88_ERROR_INVALID_KEY // '82' CEX2A
105 // REP88_ERROR_OPERAND // '84' CEX2A
106 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
107 /* Invalid input data. */
108 return -EINVAL;
109 case REP82_ERROR_MESSAGE_TYPE:
110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
111 /**
112 * To sent a message of the wrong type is a bug in the
113 * device driver. Warn about it, disable the device
114 * and then repeat the request.
115 */
116 WARN_ON(1);
117 zdev->online = 0;
118 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE:
121 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
122 /* If a card fails disable it and repeat the request. */
123 zdev->online = 0;
124 return -EAGAIN;
125 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */
130 }
131}
132
133#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
new file mode 100644
index 000000000000..2a9349ad68b7
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -0,0 +1,100 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_mono.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/miscdevice.h>
32#include <linux/fs.h>
33#include <linux/proc_fs.h>
34#include <linux/compat.h>
35#include <asm/atomic.h>
36#include <asm/uaccess.h>
37
38#include "ap_bus.h"
39#include "zcrypt_api.h"
40#include "zcrypt_pcica.h"
41#include "zcrypt_pcicc.h"
42#include "zcrypt_pcixcc.h"
43#include "zcrypt_cex2a.h"
44
45/**
46 * The module initialization code.
47 */
48int __init zcrypt_init(void)
49{
50 int rc;
51
52 rc = ap_module_init();
53 if (rc)
54 goto out;
55 rc = zcrypt_api_init();
56 if (rc)
57 goto out_ap;
58 rc = zcrypt_pcica_init();
59 if (rc)
60 goto out_api;
61 rc = zcrypt_pcicc_init();
62 if (rc)
63 goto out_pcica;
64 rc = zcrypt_pcixcc_init();
65 if (rc)
66 goto out_pcicc;
67 rc = zcrypt_cex2a_init();
68 if (rc)
69 goto out_pcixcc;
70 return 0;
71
72out_pcixcc:
73 zcrypt_pcixcc_exit();
74out_pcicc:
75 zcrypt_pcicc_exit();
76out_pcica:
77 zcrypt_pcica_exit();
78out_api:
79 zcrypt_api_exit();
80out_ap:
81 ap_module_exit();
82out:
83 return rc;
84}
85
86/**
87 * The module termination code.
88 */
89void __exit zcrypt_exit(void)
90{
91 zcrypt_cex2a_exit();
92 zcrypt_pcixcc_exit();
93 zcrypt_pcicc_exit();
94 zcrypt_pcica_exit();
95 zcrypt_api_exit();
96 ap_module_exit();
97}
98
99module_init(zcrypt_init);
100module_exit(zcrypt_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
new file mode 100644
index 000000000000..b6a4ecdc8025
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -0,0 +1,418 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcica.h"
39
40#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */
41#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define PCICA_SPEED_RATING 2800
44
45#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */
46#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define PCICA_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_pcica_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_pcica_probe(struct ap_device *ap_dev);
64static void zcrypt_pcica_remove(struct ap_device *ap_dev);
65static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_pcica_driver = {
69 .probe = zcrypt_pcica_probe,
70 .remove = zcrypt_pcica_remove,
71 .receive = zcrypt_pcica_receive,
72 .ids = zcrypt_pcica_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type4 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *modulus, *exponent, *message;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type4_sme *sme = ap_msg->message;
95 memset(sme, 0, sizeof(*sme));
96 ap_msg->length = sizeof(*sme);
97 sme->header.msg_fmt = TYPE4_SME_FMT;
98 sme->header.msg_len = sizeof(*sme);
99 sme->header.msg_type_code = TYPE4_TYPE_CODE;
100 sme->header.request_code = TYPE4_REQU_CODE;
101 modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
102 exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
103 message = sme->message + sizeof(sme->message) - mod_len;
104 } else {
105 struct type4_lme *lme = ap_msg->message;
106 memset(lme, 0, sizeof(*lme));
107 ap_msg->length = sizeof(*lme);
108 lme->header.msg_fmt = TYPE4_LME_FMT;
109 lme->header.msg_len = sizeof(*lme);
110 lme->header.msg_type_code = TYPE4_TYPE_CODE;
111 lme->header.request_code = TYPE4_REQU_CODE;
112 modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
113 exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
114 message = lme->message + sizeof(lme->message) - mod_len;
115 }
116
117 if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
118 copy_from_user(exponent, mex->b_key, mod_len) ||
119 copy_from_user(message, mex->inputdata, mod_len))
120 return -EFAULT;
121 return 0;
122}
123
124/**
125 * Convert a ICACRT message to a type4 CRT message.
126 *
127 * @zdev: crypto device pointer
128 * @zreq: crypto request pointer
129 * @crt: pointer to user input data
130 *
131 * Returns 0 on success or -EFAULT.
132 */
133static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
134 struct ap_message *ap_msg,
135 struct ica_rsa_modexpo_crt *crt)
136{
137 unsigned char *p, *q, *dp, *dq, *u, *inp;
138 int mod_len, short_len, long_len;
139
140 mod_len = crt->inputdatalength;
141 short_len = mod_len / 2;
142 long_len = mod_len / 2 + 8;
143
144 if (mod_len <= 128) {
145 struct type4_scr *scr = ap_msg->message;
146 memset(scr, 0, sizeof(*scr));
147 ap_msg->length = sizeof(*scr);
148 scr->header.msg_type_code = TYPE4_TYPE_CODE;
149 scr->header.request_code = TYPE4_REQU_CODE;
150 scr->header.msg_fmt = TYPE4_SCR_FMT;
151 scr->header.msg_len = sizeof(*scr);
152 p = scr->p + sizeof(scr->p) - long_len;
153 q = scr->q + sizeof(scr->q) - short_len;
154 dp = scr->dp + sizeof(scr->dp) - long_len;
155 dq = scr->dq + sizeof(scr->dq) - short_len;
156 u = scr->u + sizeof(scr->u) - long_len;
157 inp = scr->message + sizeof(scr->message) - mod_len;
158 } else {
159 struct type4_lcr *lcr = ap_msg->message;
160 memset(lcr, 0, sizeof(*lcr));
161 ap_msg->length = sizeof(*lcr);
162 lcr->header.msg_type_code = TYPE4_TYPE_CODE;
163 lcr->header.request_code = TYPE4_REQU_CODE;
164 lcr->header.msg_fmt = TYPE4_LCR_FMT;
165 lcr->header.msg_len = sizeof(*lcr);
166 p = lcr->p + sizeof(lcr->p) - long_len;
167 q = lcr->q + sizeof(lcr->q) - short_len;
168 dp = lcr->dp + sizeof(lcr->dp) - long_len;
169 dq = lcr->dq + sizeof(lcr->dq) - short_len;
170 u = lcr->u + sizeof(lcr->u) - long_len;
171 inp = lcr->message + sizeof(lcr->message) - mod_len;
172 }
173
174 if (copy_from_user(p, crt->np_prime, long_len) ||
175 copy_from_user(q, crt->nq_prime, short_len) ||
176 copy_from_user(dp, crt->bp_key, long_len) ||
177 copy_from_user(dq, crt->bq_key, short_len) ||
178 copy_from_user(u, crt->u_mult_inv, long_len) ||
179 copy_from_user(inp, crt->inputdata, mod_len))
180 return -EFAULT;
181 return 0;
182}
183
184/**
185 * Copy results from a type 84 reply message back to user space.
186 *
187 * @zdev: crypto device pointer
188 * @reply: reply AP message.
189 * @data: pointer to user output data
190 * @length: size of user output data
191 *
192 * Returns 0 on success or -EFAULT.
193 */
194static inline int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply,
196 char __user *outputdata,
197 unsigned int outputdatalength)
198{
199 struct type84_hdr *t84h = reply->message;
200 char *data;
201
202 if (t84h->len < sizeof(*t84h) + outputdatalength) {
203 /* The result is too short, the PCICA card may not do that.. */
204 zdev->online = 0;
205 return -EAGAIN; /* repeat the request on a different device. */
206 }
207 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
208 data = reply->message + t84h->len - outputdatalength;
209 if (copy_to_user(outputdata, data, outputdatalength))
210 return -EFAULT;
211 return 0;
212}
213
214static int convert_response(struct zcrypt_device *zdev,
215 struct ap_message *reply,
216 char __user *outputdata,
217 unsigned int outputdatalength)
218{
219 /* Response type byte is the second byte in the response. */
220 switch (((unsigned char *) reply->message)[1]) {
221 case TYPE82_RSP_CODE:
222 case TYPE88_RSP_CODE:
223 return convert_error(zdev, reply);
224 case TYPE84_RSP_CODE:
225 return convert_type84(zdev, reply,
226 outputdata, outputdatalength);
227 default: /* Unknown response type, this should NEVER EVER happen */
228 PRINTK("Unrecognized Message Header: %08x%08x\n",
229 *(unsigned int *) reply->message,
230 *(unsigned int *) (reply->message+4));
231 zdev->online = 0;
232 return -EAGAIN; /* repeat the request on a different device. */
233 }
234}
235
236/**
237 * This function is called from the AP bus code after a crypto request
238 * "msg" has finished with the reply message "reply".
239 * It is called from tasklet context.
240 * @ap_dev: pointer to the AP device
241 * @msg: pointer to the AP message
242 * @reply: pointer to the AP reply message
243 */
244static void zcrypt_pcica_receive(struct ap_device *ap_dev,
245 struct ap_message *msg,
246 struct ap_message *reply)
247{
248 static struct error_hdr error_reply = {
249 .type = TYPE82_RSP_CODE,
250 .reply_code = REP82_ERROR_MACHINE_FAILURE,
251 };
252 struct type84_hdr *t84h = reply->message;
253 int length;
254
255 /* Copy the reply message to the request message buffer. */
256 if (IS_ERR(reply))
257 memcpy(msg->message, &error_reply, sizeof(error_reply));
258 else if (t84h->code == TYPE84_RSP_CODE) {
259 length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
260 memcpy(msg->message, reply->message, length);
261 } else
262 memcpy(msg->message, reply->message, sizeof error_reply);
263 complete((struct completion *) msg->private);
264}
265
266static atomic_t zcrypt_step = ATOMIC_INIT(0);
267
268/**
269 * The request distributor calls this function if it picked the PCICA
270 * device to handle a modexpo request.
271 * @zdev: pointer to zcrypt_device structure that identifies the
272 * PCICA device to the request distributor
273 * @mex: pointer to the modexpo request buffer
274 */
275static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
276 struct ica_rsa_modexpo *mex)
277{
278 struct ap_message ap_msg;
279 struct completion work;
280 int rc;
281
282 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
283 if (!ap_msg.message)
284 return -ENOMEM;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
286 atomic_inc_return(&zcrypt_step);
287 ap_msg.private = &work;
288 rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
289 if (rc)
290 goto out_free;
291 init_completion(&work);
292 ap_queue_message(zdev->ap_dev, &ap_msg);
293 rc = wait_for_completion_interruptible_timeout(
294 &work, PCICA_CLEANUP_TIME);
295 if (rc > 0)
296 rc = convert_response(zdev, &ap_msg, mex->outputdata,
297 mex->outputdatalength);
298 else {
299 /* Signal pending or message timed out. */
300 ap_cancel_message(zdev->ap_dev, &ap_msg);
301 if (rc == 0)
302 /* Message timed out. */
303 rc = -ETIME;
304 }
305out_free:
306 kfree(ap_msg.message);
307 return rc;
308}
309
310/**
311 * The request distributor calls this function if it picked the PCICA
312 * device to handle a modexpo_crt request.
313 * @zdev: pointer to zcrypt_device structure that identifies the
314 * PCICA device to the request distributor
315 * @crt: pointer to the modexpoc_crt request buffer
316 */
317static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct ica_rsa_modexpo_crt *crt)
319{
320 struct ap_message ap_msg;
321 struct completion work;
322 int rc;
323
324 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
325 if (!ap_msg.message)
326 return -ENOMEM;
327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
328 atomic_inc_return(&zcrypt_step);
329 ap_msg.private = &work;
330 rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
331 if (rc)
332 goto out_free;
333 init_completion(&work);
334 ap_queue_message(zdev->ap_dev, &ap_msg);
335 rc = wait_for_completion_interruptible_timeout(
336 &work, PCICA_CLEANUP_TIME);
337 if (rc > 0)
338 rc = convert_response(zdev, &ap_msg, crt->outputdata,
339 crt->outputdatalength);
340 else {
341 /* Signal pending or message timed out. */
342 ap_cancel_message(zdev->ap_dev, &ap_msg);
343 if (rc == 0)
344 /* Message timed out. */
345 rc = -ETIME;
346 }
347out_free:
348 kfree(ap_msg.message);
349 return rc;
350}
351
352/**
353 * The crypto operations for a PCICA card.
354 */
355static struct zcrypt_ops zcrypt_pcica_ops = {
356 .rsa_modexpo = zcrypt_pcica_modexpo,
357 .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
358};
359
360/**
361 * Probe function for PCICA cards. It always accepts the AP device
362 * since the bus_match already checked the hardware type.
363 * @ap_dev: pointer to the AP device.
364 */
365static int zcrypt_pcica_probe(struct ap_device *ap_dev)
366{
367 struct zcrypt_device *zdev;
368 int rc;
369
370 zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
371 if (!zdev)
372 return -ENOMEM;
373 zdev->ap_dev = ap_dev;
374 zdev->ops = &zcrypt_pcica_ops;
375 zdev->online = 1;
376 zdev->user_space_type = ZCRYPT_PCICA;
377 zdev->type_string = "PCICA";
378 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
379 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
380 zdev->speed_rating = PCICA_SPEED_RATING;
381 ap_dev->reply = &zdev->reply;
382 ap_dev->private = zdev;
383 rc = zcrypt_device_register(zdev);
384 if (rc)
385 goto out_free;
386 return 0;
387
388out_free:
389 ap_dev->private = NULL;
390 zcrypt_device_free(zdev);
391 return rc;
392}
393
394/**
395 * This is called to remove the extended PCICA driver information
396 * if an AP device is removed.
397 */
398static void zcrypt_pcica_remove(struct ap_device *ap_dev)
399{
400 struct zcrypt_device *zdev = ap_dev->private;
401
402 zcrypt_device_unregister(zdev);
403}
404
405int __init zcrypt_pcica_init(void)
406{
407 return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
408}
409
410void zcrypt_pcica_exit(void)
411{
412 ap_driver_unregister(&zcrypt_pcica_driver);
413}
414
415#ifndef CONFIG_ZCRYPT_MONOLITHIC
416module_init(zcrypt_pcica_init);
417module_exit(zcrypt_pcica_exit);
418#endif
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
new file mode 100644
index 000000000000..3be11187f6df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -0,0 +1,117 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICA_H_
29#define _ZCRYPT_PCICA_H_
30
31/**
32 * The type 4 message family is associated with a PCICA card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type4_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x04 */
44 unsigned short msg_len;
45 unsigned char request_code; /* 0x40 */
46 unsigned char msg_fmt;
47 unsigned short reserved2;
48} __attribute__((packed));
49
50#define TYPE4_TYPE_CODE 0x04
51#define TYPE4_REQU_CODE 0x40
52
53#define TYPE4_SME_FMT 0x00
54#define TYPE4_LME_FMT 0x10
55#define TYPE4_SCR_FMT 0x40
56#define TYPE4_LCR_FMT 0x50
57
58/* Mod-Exp, with a small modulus */
59struct type4_sme {
60 struct type4_hdr header;
61 unsigned char message[128];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64} __attribute__((packed));
65
66/* Mod-Exp, with a large modulus */
67struct type4_lme {
68 struct type4_hdr header;
69 unsigned char message[256];
70 unsigned char exponent[256];
71 unsigned char modulus[256];
72} __attribute__((packed));
73
74/* CRT, with a small modulus */
75struct type4_scr {
76 struct type4_hdr header;
77 unsigned char message[128];
78 unsigned char dp[72];
79 unsigned char dq[64];
80 unsigned char p[72];
81 unsigned char q[64];
82 unsigned char u[72];
83} __attribute__((packed));
84
85/* CRT, with a large modulus */
86struct type4_lcr {
87 struct type4_hdr header;
88 unsigned char message[256];
89 unsigned char dp[136];
90 unsigned char dq[128];
91 unsigned char p[136];
92 unsigned char q[128];
93 unsigned char u[136];
94} __attribute__((packed));
95
96/**
97 * The type 84 response family is associated with a PCICA card.
98 *
99 * Note that all unsigned char arrays are right-justified and left-padded
100 * with zeroes.
101 *
102 * Note that all reserved fields must be zeroes.
103 */
104
105struct type84_hdr {
106 unsigned char reserved1;
107 unsigned char code;
108 unsigned short len;
109 unsigned char reserved2[4];
110} __attribute__((packed));
111
112#define TYPE84_RSP_CODE 0x84
113
114int zcrypt_pcica_init(void);
115void zcrypt_pcica_exit(void);
116
117#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
new file mode 100644
index 000000000000..f295a403b29a
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -0,0 +1,630 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcicc.h"
39#include "zcrypt_cca_key.h"
40
41#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
44
45/**
46 * PCICC cards need a speed rating of 0. This keeps them at the end of
47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
48 * used if no other cards are present because they are slow and can only
49 * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
50 * requests are rejected. The modexpo function encrypts PKCS12 padded data
51 * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
52 * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
53 * the data in the assumption that its PKCS12 encrypted data.
54 */
55#define PCICC_SPEED_RATING 0
56
57#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
58#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
59
60#define PCICC_CLEANUP_TIME (15*HZ)
61
62static struct ap_device_id zcrypt_pcicc_ids[] = {
63 { AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
64 { /* end of list */ },
65};
66
67#ifndef CONFIG_ZCRYPT_MONOLITHIC
68MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
69MODULE_AUTHOR("IBM Corporation");
70MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
71 "Copyright 2001, 2006 IBM Corporation");
72MODULE_LICENSE("GPL");
73#endif
74
75static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
76static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
77static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
78 struct ap_message *);
79
80static struct ap_driver zcrypt_pcicc_driver = {
81 .probe = zcrypt_pcicc_probe,
82 .remove = zcrypt_pcicc_remove,
83 .receive = zcrypt_pcicc_receive,
84 .ids = zcrypt_pcicc_ids,
85};
86
87/**
88 * The following is used to initialize the CPRB passed to the PCICC card
89 * in a type6 message. The 3 fields that must be filled in at execution
90 * time are req_parml, rpl_parml and usage_domain. Note that all three
91 * fields are *little*-endian. Actually, everything about this interface
92 * is ascii/little-endian, since the device has 'Intel inside'.
93 *
94 * The CPRB is followed immediately by the parm block.
95 * The parm block contains:
96 * - function code ('PD' 0x5044 or 'PK' 0x504B)
97 * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
98 * - VUD block
99 */
100static struct CPRB static_cprb = {
101 .cprb_len = __constant_cpu_to_le16(0x0070),
102 .cprb_ver_id = 0x41,
103 .func_id = {0x54,0x32},
104 .checkpoint_flag= 0x01,
105 .svr_namel = __constant_cpu_to_le16(0x0008),
106 .svr_name = {'I','C','S','F',' ',' ',' ',' '}
107};
108
109/**
110 * Check the message for PKCS11 padding.
111 */
112static inline int is_PKCS11_padded(unsigned char *buffer, int length)
113{
114 int i;
115 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
116 return 0;
117 for (i = 2; i < length; i++)
118 if (buffer[i] != 0xFF)
119 break;
120 if (i < 10 || i == length)
121 return 0;
122 if (buffer[i] != 0x00)
123 return 0;
124 return 1;
125}
126
127/**
128 * Check the message for PKCS12 padding.
129 */
130static inline int is_PKCS12_padded(unsigned char *buffer, int length)
131{
132 int i;
133 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
134 return 0;
135 for (i = 2; i < length; i++)
136 if (buffer[i] == 0x00)
137 break;
138 if ((i < 10) || (i == length))
139 return 0;
140 if (buffer[i] != 0x00)
141 return 0;
142 return 1;
143}
144
145/**
146 * Convert a ICAMEX message to a type6 MEX message.
147 *
148 * @zdev: crypto device pointer
149 * @zreq: crypto request pointer
150 * @mex: pointer to user input data
151 *
152 * Returns 0 on success or -EFAULT.
153 */
154static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
155 struct ap_message *ap_msg,
156 struct ica_rsa_modexpo *mex)
157{
158 static struct type6_hdr static_type6_hdr = {
159 .type = 0x06,
160 .offset1 = 0x00000058,
161 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
162 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
163 .function_code = {'P','K'},
164 };
165 static struct function_and_rules_block static_pke_function_and_rules ={
166 .function_code = {'P','K'},
167 .ulen = __constant_cpu_to_le16(10),
168 .only_rule = {'P','K','C','S','-','1','.','2'}
169 };
170 struct {
171 struct type6_hdr hdr;
172 struct CPRB cprb;
173 struct function_and_rules_block fr;
174 unsigned short length;
175 char text[0];
176 } __attribute__((packed)) *msg = ap_msg->message;
177 int vud_len, pad_len, size;
178
179 /* VUD.ciphertext */
180 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
181 return -EFAULT;
182
183 if (is_PKCS11_padded(msg->text, mex->inputdatalength))
184 return -EINVAL;
185
186 /* static message header and f&r */
187 msg->hdr = static_type6_hdr;
188 msg->fr = static_pke_function_and_rules;
189
190 if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
191 /* strip the padding and adjust the data length */
192 pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
193 if (pad_len <= 9 || pad_len >= mex->inputdatalength)
194 return -ENODEV;
195 vud_len = mex->inputdatalength - pad_len;
196 memmove(msg->text, msg->text + pad_len, vud_len);
197 msg->length = cpu_to_le16(vud_len + 2);
198
199 /* Set up key after the variable length text. */
200 size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
201 if (size < 0)
202 return size;
203 size += sizeof(*msg) + vud_len; /* total size of msg */
204 } else {
205 vud_len = mex->inputdatalength;
206 msg->length = cpu_to_le16(2 + vud_len);
207
208 msg->hdr.function_code[1] = 'D';
209 msg->fr.function_code[1] = 'D';
210
211 /* Set up key after the variable length text. */
212 size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
213 if (size < 0)
214 return size;
215 size += sizeof(*msg) + vud_len; /* total size of msg */
216 }
217
218 /* message header, cprb and f&r */
219 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
220 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
221
222 msg->cprb = static_cprb;
223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
224 msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
225 sizeof(msg->cprb));
226 msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
227
228 ap_msg->length = (size + 3) & -4;
229 return 0;
230}
231
232/**
233 * Convert a ICACRT message to a type6 CRT message.
234 *
235 * @zdev: crypto device pointer
236 * @zreq: crypto request pointer
237 * @crt: pointer to user input data
238 *
239 * Returns 0 on success or -EFAULT.
240 */
241static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
242 struct ap_message *ap_msg,
243 struct ica_rsa_modexpo_crt *crt)
244{
245 static struct type6_hdr static_type6_hdr = {
246 .type = 0x06,
247 .offset1 = 0x00000058,
248 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
249 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
250 .function_code = {'P','D'},
251 };
252 static struct function_and_rules_block static_pkd_function_and_rules ={
253 .function_code = {'P','D'},
254 .ulen = __constant_cpu_to_le16(10),
255 .only_rule = {'P','K','C','S','-','1','.','2'}
256 };
257 struct {
258 struct type6_hdr hdr;
259 struct CPRB cprb;
260 struct function_and_rules_block fr;
261 unsigned short length;
262 char text[0];
263 } __attribute__((packed)) *msg = ap_msg->message;
264 int size;
265
266 /* VUD.ciphertext */
267 msg->length = cpu_to_le16(2 + crt->inputdatalength);
268 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
269 return -EFAULT;
270
271 if (is_PKCS11_padded(msg->text, crt->inputdatalength))
272 return -EINVAL;
273
274 /* Set up key after the variable length text. */
275 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
276 if (size < 0)
277 return size;
278 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
279
280 /* message header, cprb and f&r */
281 msg->hdr = static_type6_hdr;
282 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
283 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
284
285 msg->cprb = static_cprb;
286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
287 msg->cprb.req_parml = msg->cprb.rpl_parml =
288 cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
289
290 msg->fr = static_pkd_function_and_rules;
291
292 ap_msg->length = (size + 3) & -4;
293 return 0;
294}
295
296/**
297 * Copy results from a type 86 reply message back to user space.
298 *
299 * @zdev: crypto device pointer
300 * @reply: reply AP message.
301 * @data: pointer to user output data
302 * @length: size of user output data
303 *
304 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
305 */
306struct type86_reply {
307 struct type86_hdr hdr;
308 struct type86_fmt2_ext fmt2;
309 struct CPRB cprb;
310 unsigned char pad[4]; /* 4 byte function code/rules block ? */
311 unsigned short length;
312 char text[0];
313} __attribute__((packed));
314
315static int convert_type86(struct zcrypt_device *zdev,
316 struct ap_message *reply,
317 char __user *outputdata,
318 unsigned int outputdatalength)
319{
320 static unsigned char static_pad[] = {
321 0x00,0x02,
322 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
323 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
324 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
325 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
326 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
327 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
328 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
329 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
330 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
331 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
332 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
333 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
334 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
335 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
336 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
337 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
338 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
339 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
340 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
341 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
342 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
343 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
344 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
345 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
346 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
347 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
348 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
349 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
350 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
351 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
352 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
353 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
354 };
355 struct type86_reply *msg = reply->message;
356 unsigned short service_rc, service_rs;
357 unsigned int reply_len, pad_len;
358 char *data;
359
360 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
361 if (unlikely(service_rc != 0)) {
362 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
363 if (service_rc == 8 && service_rs == 66) {
364 PDEBUG("Bad block format on PCICC\n");
365 return -EINVAL;
366 }
367 if (service_rc == 8 && service_rs == 65) {
368 PDEBUG("Probably an even modulus on PCICC\n");
369 return -EINVAL;
370 }
371 if (service_rc == 8 && service_rs == 770) {
372 PDEBUG("Invalid key length on PCICC\n");
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN;
375 }
376 if (service_rc == 8 && service_rs == 783) {
377 PDEBUG("Extended bitlengths not enabled on PCICC\n");
378 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
379 return -EAGAIN;
380 }
381 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
382 service_rc, service_rs);
383 zdev->online = 0;
384 return -EAGAIN; /* repeat the request on a different device. */
385 }
386 data = msg->text;
387 reply_len = le16_to_cpu(msg->length) - 2;
388 if (reply_len > outputdatalength)
389 return -EINVAL;
390 /**
391 * For all encipher requests, the length of the ciphertext (reply_len)
392 * will always equal the modulus length. For MEX decipher requests
393 * the output needs to get padded. Minimum pad size is 10.
394 *
395 * Currently, the cases where padding will be added is for:
396 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
397 * ZERO-PAD and CRT is only supported for PKD requests)
398 * - PCICC, always
399 */
400 pad_len = outputdatalength - reply_len;
401 if (pad_len > 0) {
402 if (pad_len < 10)
403 return -EINVAL;
404 /* 'restore' padding left in the PCICC/PCIXCC card. */
405 if (copy_to_user(outputdata, static_pad, pad_len - 1))
406 return -EFAULT;
407 if (put_user(0, outputdata + pad_len - 1))
408 return -EFAULT;
409 }
410 /* Copy the crypto response to user space. */
411 if (copy_to_user(outputdata + pad_len, data, reply_len))
412 return -EFAULT;
413 return 0;
414}
415
416static int convert_response(struct zcrypt_device *zdev,
417 struct ap_message *reply,
418 char __user *outputdata,
419 unsigned int outputdatalength)
420{
421 struct type86_reply *msg = reply->message;
422
423 /* Response type byte is the second byte in the response. */
424 switch (msg->hdr.type) {
425 case TYPE82_RSP_CODE:
426 case TYPE88_RSP_CODE:
427 return convert_error(zdev, reply);
428 case TYPE86_RSP_CODE:
429 if (msg->hdr.reply_code)
430 return convert_error(zdev, reply);
431 if (msg->cprb.cprb_ver_id == 0x01)
432 return convert_type86(zdev, reply,
433 outputdata, outputdatalength);
434 /* no break, incorrect cprb version is an unknown response */
435 default: /* Unknown response type, this should NEVER EVER happen */
436 PRINTK("Unrecognized Message Header: %08x%08x\n",
437 *(unsigned int *) reply->message,
438 *(unsigned int *) (reply->message+4));
439 zdev->online = 0;
440 return -EAGAIN; /* repeat the request on a different device. */
441 }
442}
443
444/**
445 * This function is called from the AP bus code after a crypto request
446 * "msg" has finished with the reply message "reply".
447 * It is called from tasklet context.
448 * @ap_dev: pointer to the AP device
449 * @msg: pointer to the AP message
450 * @reply: pointer to the AP reply message
451 */
452static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
453 struct ap_message *msg,
454 struct ap_message *reply)
455{
456 static struct error_hdr error_reply = {
457 .type = TYPE82_RSP_CODE,
458 .reply_code = REP82_ERROR_MACHINE_FAILURE,
459 };
460 struct type86_reply *t86r = reply->message;
461 int length;
462
463 /* Copy the reply message to the request message buffer. */
464 if (IS_ERR(reply))
465 memcpy(msg->message, &error_reply, sizeof(error_reply));
466 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
467 t86r->cprb.cprb_ver_id == 0x01) {
468 length = sizeof(struct type86_reply) + t86r->length - 2;
469 length = min(PCICC_MAX_RESPONSE_SIZE, length);
470 memcpy(msg->message, reply->message, length);
471 } else
472 memcpy(msg->message, reply->message, sizeof error_reply);
473 complete((struct completion *) msg->private);
474}
475
476static atomic_t zcrypt_step = ATOMIC_INIT(0);
477
478/**
479 * The request distributor calls this function if it picked the PCICC
480 * device to handle a modexpo request.
481 * @zdev: pointer to zcrypt_device structure that identifies the
482 * PCICC device to the request distributor
483 * @mex: pointer to the modexpo request buffer
484 */
485static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
486 struct ica_rsa_modexpo *mex)
487{
488 struct ap_message ap_msg;
489 struct completion work;
490 int rc;
491
492 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
493 if (!ap_msg.message)
494 return -ENOMEM;
495 ap_msg.length = PAGE_SIZE;
496 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
497 atomic_inc_return(&zcrypt_step);
498 ap_msg.private = &work;
499 rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
500 if (rc)
501 goto out_free;
502 init_completion(&work);
503 ap_queue_message(zdev->ap_dev, &ap_msg);
504 rc = wait_for_completion_interruptible_timeout(
505 &work, PCICC_CLEANUP_TIME);
506 if (rc > 0)
507 rc = convert_response(zdev, &ap_msg, mex->outputdata,
508 mex->outputdatalength);
509 else {
510 /* Signal pending or message timed out. */
511 ap_cancel_message(zdev->ap_dev, &ap_msg);
512 if (rc == 0)
513 /* Message timed out. */
514 rc = -ETIME;
515 }
516out_free:
517 free_page((unsigned long) ap_msg.message);
518 return rc;
519}
520
521/**
522 * The request distributor calls this function if it picked the PCICC
523 * device to handle a modexpo_crt request.
524 * @zdev: pointer to zcrypt_device structure that identifies the
525 * PCICC device to the request distributor
526 * @crt: pointer to the modexpoc_crt request buffer
527 */
528static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
529 struct ica_rsa_modexpo_crt *crt)
530{
531 struct ap_message ap_msg;
532 struct completion work;
533 int rc;
534
535 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
536 if (!ap_msg.message)
537 return -ENOMEM;
538 ap_msg.length = PAGE_SIZE;
539 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
540 atomic_inc_return(&zcrypt_step);
541 ap_msg.private = &work;
542 rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
543 if (rc)
544 goto out_free;
545 init_completion(&work);
546 ap_queue_message(zdev->ap_dev, &ap_msg);
547 rc = wait_for_completion_interruptible_timeout(
548 &work, PCICC_CLEANUP_TIME);
549 if (rc > 0)
550 rc = convert_response(zdev, &ap_msg, crt->outputdata,
551 crt->outputdatalength);
552 else {
553 /* Signal pending or message timed out. */
554 ap_cancel_message(zdev->ap_dev, &ap_msg);
555 if (rc == 0)
556 /* Message timed out. */
557 rc = -ETIME;
558 }
559out_free:
560 free_page((unsigned long) ap_msg.message);
561 return rc;
562}
563
564/**
565 * The crypto operations for a PCICC card.
566 */
567static struct zcrypt_ops zcrypt_pcicc_ops = {
568 .rsa_modexpo = zcrypt_pcicc_modexpo,
569 .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
570};
571
572/**
573 * Probe function for PCICC cards. It always accepts the AP device
574 * since the bus_match already checked the hardware type.
575 * @ap_dev: pointer to the AP device.
576 */
577static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
578{
579 struct zcrypt_device *zdev;
580 int rc;
581
582 zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
583 if (!zdev)
584 return -ENOMEM;
585 zdev->ap_dev = ap_dev;
586 zdev->ops = &zcrypt_pcicc_ops;
587 zdev->online = 1;
588 zdev->user_space_type = ZCRYPT_PCICC;
589 zdev->type_string = "PCICC";
590 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
591 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
592 zdev->speed_rating = PCICC_SPEED_RATING;
593 ap_dev->reply = &zdev->reply;
594 ap_dev->private = zdev;
595 rc = zcrypt_device_register(zdev);
596 if (rc)
597 goto out_free;
598 return 0;
599
600 out_free:
601 ap_dev->private = NULL;
602 zcrypt_device_free(zdev);
603 return rc;
604}
605
606/**
607 * This is called to remove the extended PCICC driver information
608 * if an AP device is removed.
609 */
610static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
611{
612 struct zcrypt_device *zdev = ap_dev->private;
613
614 zcrypt_device_unregister(zdev);
615}
616
617int __init zcrypt_pcicc_init(void)
618{
619 return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
620}
621
622void zcrypt_pcicc_exit(void)
623{
624 ap_driver_unregister(&zcrypt_pcicc_driver);
625}
626
627#ifndef CONFIG_ZCRYPT_MONOLITHIC
628module_init(zcrypt_pcicc_init);
629module_exit(zcrypt_pcicc_exit);
630#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
new file mode 100644
index 000000000000..6d4454846c8f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -0,0 +1,176 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICC_H_
29#define _ZCRYPT_PCICC_H_
30
31/**
32 * The type 6 message family is associated with PCICC or PCIXCC cards.
33 *
34 * It contains a message header followed by a CPRB, both of which
35 * are described below.
36 *
37 * Note that all reserved fields must be zeroes.
38 */
39struct type6_hdr {
40 unsigned char reserved1; /* 0x00 */
41 unsigned char type; /* 0x06 */
42 unsigned char reserved2[2]; /* 0x0000 */
43 unsigned char right[4]; /* 0x00000000 */
44 unsigned char reserved3[2]; /* 0x0000 */
45 unsigned char reserved4[2]; /* 0x0000 */
46 unsigned char apfs[4]; /* 0x00000000 */
47 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
48 unsigned int offset2; /* 0x00000000 */
49 unsigned int offset3; /* 0x00000000 */
50 unsigned int offset4; /* 0x00000000 */
51 unsigned char agent_id[16]; /* PCICC: */
52 /* 0x0100 */
53 /* 0x4343412d4150504c202020 */
54 /* 0x010101 */
55 /* PCIXCC: */
56 /* 0x4341000000000000 */
57 /* 0x0000000000000000 */
58 unsigned char rqid[2]; /* rqid. internal to 603 */
59 unsigned char reserved5[2]; /* 0x0000 */
60 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
61 unsigned char reserved6[2]; /* 0x0000 */
62 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
63 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
64 unsigned int ToCardLen3; /* 0x00000000 */
65 unsigned int ToCardLen4; /* 0x00000000 */
66 unsigned int FromCardLen1; /* response buffer length */
67 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
68 unsigned int FromCardLen3; /* 0x00000000 */
69 unsigned int FromCardLen4; /* 0x00000000 */
70} __attribute__((packed));
71
72/**
73 * CPRB
74 * Note that all shorts, ints and longs are little-endian.
75 * All pointer fields are 32-bits long, and mean nothing
76 *
77 * A request CPRB is followed by a request_parameter_block.
78 *
79 * The request (or reply) parameter block is organized thus:
80 * function code
81 * VUD block
82 * key block
83 */
84struct CPRB {
85 unsigned short cprb_len; /* CPRB length */
86 unsigned char cprb_ver_id; /* CPRB version id. */
87 unsigned char pad_000; /* Alignment pad byte. */
88 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
89 unsigned char srpi_verb; /* SRPI verb type */
90 unsigned char flags; /* flags */
91 unsigned char func_id[2]; /* function id */
92 unsigned char checkpoint_flag; /* */
93 unsigned char resv2; /* reserved */
94 unsigned short req_parml; /* request parameter buffer */
95 /* length 16-bit little endian */
96 unsigned char req_parmp[4]; /* request parameter buffer *
97 * pointer (means nothing: the *
98 * parameter buffer follows *
99 * the CPRB). */
100 unsigned char req_datal[4]; /* request data buffer */
101 /* length ULELONG */
102 unsigned char req_datap[4]; /* request data buffer */
103 /* pointer */
104 unsigned short rpl_parml; /* reply parameter buffer */
105 /* length 16-bit little endian */
106 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
107 unsigned char rpl_parmp[4]; /* reply parameter buffer *
108 * pointer (means nothing: the *
109 * parameter buffer follows *
110 * the CPRB). */
111 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
112 unsigned char rpl_datap[4]; /* reply data buffer */
113 /* pointer */
114 unsigned short ccp_rscode; /* server reason code ULESHORT */
115 unsigned short ccp_rtcode; /* server return code ULESHORT */
116 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
117 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
118 unsigned char repd_datal[4]; /* replied data length ULELONG */
119 unsigned char req_pc[2]; /* PC identifier */
120 unsigned char res_origin[8]; /* resource origin */
121 unsigned char mac_value[8]; /* Mac Value */
122 unsigned char logon_id[8]; /* Logon Identifier */
123 unsigned char usage_domain[2]; /* cdx */
124 unsigned char resv3[18]; /* reserved for requestor */
125 unsigned short svr_namel; /* server name length ULESHORT */
126 unsigned char svr_name[8]; /* server name */
127} __attribute__((packed));
128
129/**
130 * The type 86 message family is associated with PCICC and PCIXCC cards.
131 *
132 * It contains a message header followed by a CPRB. The CPRB is
133 * the same as the request CPRB, which is described above.
134 *
135 * If format is 1, an error condition exists and no data beyond
136 * the 8-byte message header is of interest.
137 *
138 * The non-error message is shown below.
139 *
140 * Note that all reserved fields must be zeroes.
141 */
142struct type86_hdr {
143 unsigned char reserved1; /* 0x00 */
144 unsigned char type; /* 0x86 */
145 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
146 unsigned char reserved2; /* 0x00 */
147 unsigned char reply_code; /* reply code (see above) */
148 unsigned char reserved3[3]; /* 0x000000 */
149} __attribute__((packed));
150
151#define TYPE86_RSP_CODE 0x86
152#define TYPE86_FMT2 0x02
153
154struct type86_fmt2_ext {
155 unsigned char reserved[4]; /* 0x00000000 */
156 unsigned char apfs[4]; /* final status */
157 unsigned int count1; /* length of CPRB + parameters */
158 unsigned int offset1; /* offset to CPRB */
159 unsigned int count2; /* 0x00000000 */
160 unsigned int offset2; /* db offset 0x00000000 for PKD */
161 unsigned int count3; /* 0x00000000 */
162 unsigned int offset3; /* 0x00000000 */
163 unsigned int count4; /* 0x00000000 */
164 unsigned int offset4; /* 0x00000000 */
165} __attribute__((packed));
166
167struct function_and_rules_block {
168 unsigned char function_code[2];
169 unsigned short ulen;
170 unsigned char only_rule[8];
171} __attribute__((packed));
172
173int zcrypt_pcicc_init(void);
174void zcrypt_pcicc_exit(void);
175
176#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 000000000000..2da8b9381407
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,951 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/delay.h>
33#include <asm/atomic.h>
34#include <asm/uaccess.h>
35
36#include "ap_bus.h"
37#include "zcrypt_api.h"
38#include "zcrypt_error.h"
39#include "zcrypt_pcicc.h"
40#include "zcrypt_pcixcc.h"
41#include "zcrypt_cca_key.h"
42
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
46
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
48#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540
50
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
53
54#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
55#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
56#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
57#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
58
59#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
60
61#define PCIXCC_CLEANUP_TIME (15*HZ)
62
63#define CEIL4(x) ((((x)+3)/4)*4)
64
65struct response_type {
66 struct completion work;
67 int type;
68};
69#define PCIXCC_RESPONSE_TYPE_ICA 0
70#define PCIXCC_RESPONSE_TYPE_XCRB 1
71
72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { /* end of list */ },
76};
77
78#ifndef CONFIG_ZCRYPT_MONOLITHIC
79MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
80MODULE_AUTHOR("IBM Corporation");
81MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
82 "Copyright 2001, 2006 IBM Corporation");
83MODULE_LICENSE("GPL");
84#endif
85
86static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
87static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
88static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
89 struct ap_message *);
90
91static struct ap_driver zcrypt_pcixcc_driver = {
92 .probe = zcrypt_pcixcc_probe,
93 .remove = zcrypt_pcixcc_remove,
94 .receive = zcrypt_pcixcc_receive,
95 .ids = zcrypt_pcixcc_ids,
96};
97
98/**
99 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
100 * card in a type6 message. The 3 fields that must be filled in at execution
101 * time are req_parml, rpl_parml and usage_domain.
102 * Everything about this interface is ascii/big-endian, since the
103 * device does *not* have 'Intel inside'.
104 *
105 * The CPRBX is followed immediately by the parm block.
106 * The parm block contains:
107 * - function code ('PD' 0x5044 or 'PK' 0x504B)
108 * - rule block (one of:)
109 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
110 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
111 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
112 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
113 * - VUD block
114 */
115static struct CPRBX static_cprbx = {
116 .cprb_len = 0x00DC,
117 .cprb_ver_id = 0x02,
118 .func_id = {0x54,0x32},
119};
120
121/**
122 * Convert a ICAMEX message to a type6 MEX message.
123 *
124 * @zdev: crypto device pointer
125 * @ap_msg: pointer to AP message
126 * @mex: pointer to user input data
127 *
128 * Returns 0 on success or -EFAULT.
129 */
130static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
131 struct ap_message *ap_msg,
132 struct ica_rsa_modexpo *mex)
133{
134 static struct type6_hdr static_type6_hdrX = {
135 .type = 0x06,
136 .offset1 = 0x00000058,
137 .agent_id = {'C','A',},
138 .function_code = {'P','K'},
139 };
140 static struct function_and_rules_block static_pke_fnr = {
141 .function_code = {'P','K'},
142 .ulen = 10,
143 .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
144 };
145 static struct function_and_rules_block static_pke_fnr_MCL2 = {
146 .function_code = {'P','K'},
147 .ulen = 10,
148 .only_rule = {'Z','E','R','O','-','P','A','D'}
149 };
150 struct {
151 struct type6_hdr hdr;
152 struct CPRBX cprbx;
153 struct function_and_rules_block fr;
154 unsigned short length;
155 char text[0];
156 } __attribute__((packed)) *msg = ap_msg->message;
157 int size;
158
159 /* VUD.ciphertext */
160 msg->length = mex->inputdatalength + 2;
161 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
162 return -EFAULT;
163
164 /* Set up key which is located after the variable length text. */
165 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
166 if (size < 0)
167 return size;
168 size += sizeof(*msg) + mex->inputdatalength;
169
170 /* message header, cprbx and f&r */
171 msg->hdr = static_type6_hdrX;
172 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
173 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
174
175 msg->cprbx = static_cprbx;
176 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
177 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
178
179 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
180 static_pke_fnr_MCL2 : static_pke_fnr;
181
182 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
183
184 ap_msg->length = size;
185 return 0;
186}
187
188/**
189 * Convert a ICACRT message to a type6 CRT message.
190 *
191 * @zdev: crypto device pointer
192 * @ap_msg: pointer to AP message
193 * @crt: pointer to user input data
194 *
195 * Returns 0 on success or -EFAULT.
196 */
197static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
198 struct ap_message *ap_msg,
199 struct ica_rsa_modexpo_crt *crt)
200{
201 static struct type6_hdr static_type6_hdrX = {
202 .type = 0x06,
203 .offset1 = 0x00000058,
204 .agent_id = {'C','A',},
205 .function_code = {'P','D'},
206 };
207 static struct function_and_rules_block static_pkd_fnr = {
208 .function_code = {'P','D'},
209 .ulen = 10,
210 .only_rule = {'Z','E','R','O','-','P','A','D'}
211 };
212
213 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
214 .function_code = {'P','D'},
215 .ulen = 10,
216 .only_rule = {'P','K','C','S','-','1','.','2'}
217 };
218 struct {
219 struct type6_hdr hdr;
220 struct CPRBX cprbx;
221 struct function_and_rules_block fr;
222 unsigned short length;
223 char text[0];
224 } __attribute__((packed)) *msg = ap_msg->message;
225 int size;
226
227 /* VUD.ciphertext */
228 msg->length = crt->inputdatalength + 2;
229 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
230 return -EFAULT;
231
232 /* Set up key which is located after the variable length text. */
233 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
234 if (size < 0)
235 return size;
236 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
237
238 /* message header, cprbx and f&r */
239 msg->hdr = static_type6_hdrX;
240 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
241 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
242
243 msg->cprbx = static_cprbx;
244 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
245 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
246 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
247
248 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
249 static_pkd_fnr_MCL2 : static_pkd_fnr;
250
251 ap_msg->length = size;
252 return 0;
253}
254
255/**
256 * Convert a XCRB message to a type6 CPRB message.
257 *
258 * @zdev: crypto device pointer
259 * @ap_msg: pointer to AP message
260 * @xcRB: pointer to user input data
261 *
262 * Returns 0 on success or -EFAULT.
263 */
264struct type86_fmt2_msg {
265 struct type86_hdr hdr;
266 struct type86_fmt2_ext fmt2;
267} __attribute__((packed));
268
269static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
270 struct ap_message *ap_msg,
271 struct ica_xcRB *xcRB)
272{
273 static struct type6_hdr static_type6_hdrX = {
274 .type = 0x06,
275 .offset1 = 0x00000058,
276 };
277 struct {
278 struct type6_hdr hdr;
279 struct ica_CPRBX cprbx;
280 } __attribute__((packed)) *msg = ap_msg->message;
281
282 int rcblen = CEIL4(xcRB->request_control_blk_length);
283 int replylen;
284 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
285 char *function_code;
286
287 /* length checks */
288 ap_msg->length = sizeof(struct type6_hdr) +
289 CEIL4(xcRB->request_control_blk_length) +
290 xcRB->request_data_length;
291 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) {
292 PRINTK("Combined message is too large (%ld/%d/%d).\n",
293 sizeof(struct type6_hdr),
294 xcRB->request_control_blk_length,
295 xcRB->request_data_length);
296 return -EFAULT;
297 }
298 if (CEIL4(xcRB->reply_control_blk_length) >
299 PCIXCC_MAX_XCRB_REPLY_SIZE) {
300 PDEBUG("Reply CPRB length is too large (%d).\n",
301 xcRB->request_control_blk_length);
302 return -EFAULT;
303 }
304 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
305 PDEBUG("Reply data block length is too large (%d).\n",
306 xcRB->reply_data_length);
307 return -EFAULT;
308 }
309 replylen = CEIL4(xcRB->reply_control_blk_length) +
310 CEIL4(xcRB->reply_data_length) +
311 sizeof(struct type86_fmt2_msg);
312 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
313 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
314 " (%d/%d/%d).\n",
315 sizeof(struct type86_fmt2_msg),
316 xcRB->reply_control_blk_length,
317 xcRB->reply_data_length);
318 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
319 (sizeof(struct type86_fmt2_msg) +
320 CEIL4(xcRB->reply_data_length));
321 PDEBUG("Capping Reply CPRB length at %d\n",
322 xcRB->reply_control_blk_length);
323 }
324
325 /* prepare type6 header */
326 msg->hdr = static_type6_hdrX;
327 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
328 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
329 if (xcRB->request_data_length) {
330 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
331 msg->hdr.ToCardLen2 = xcRB->request_data_length;
332 }
333 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
334 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
335
336 /* prepare CPRB */
337 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
338 xcRB->request_control_blk_length))
339 return -EFAULT;
340 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
341 xcRB->request_control_blk_length) {
342 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
343 xcRB->request_control_blk_length);
344 return -EFAULT;
345 }
346 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
347 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
348
349 /* copy data block */
350 if (xcRB->request_data_length &&
351 copy_from_user(req_data, xcRB->request_data_address,
352 xcRB->request_data_length))
353 return -EFAULT;
354 return 0;
355}
356
357/**
358 * Copy results from a type 86 ICA reply message back to user space.
359 *
360 * @zdev: crypto device pointer
361 * @reply: reply AP message.
362 * @data: pointer to user output data
363 * @length: size of user output data
364 *
365 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
366 */
367struct type86x_reply {
368 struct type86_hdr hdr;
369 struct type86_fmt2_ext fmt2;
370 struct CPRBX cprbx;
371 unsigned char pad[4]; /* 4 byte function code/rules block ? */
372 unsigned short length;
373 char text[0];
374} __attribute__((packed));
375
376static int convert_type86_ica(struct zcrypt_device *zdev,
377 struct ap_message *reply,
378 char __user *outputdata,
379 unsigned int outputdatalength)
380{
381 static unsigned char static_pad[] = {
382 0x00,0x02,
383 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
384 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
385 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
386 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
387 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
388 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
389 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
390 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
391 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
392 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
393 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
394 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
395 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
396 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
397 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
398 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
399 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
400 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
401 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
402 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
403 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
404 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
405 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
406 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
407 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
408 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
409 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
410 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
411 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
412 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
413 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
414 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
415 };
416 struct type86x_reply *msg = reply->message;
417 unsigned short service_rc, service_rs;
418 unsigned int reply_len, pad_len;
419 char *data;
420
421 service_rc = msg->cprbx.ccp_rtcode;
422 if (unlikely(service_rc != 0)) {
423 service_rs = msg->cprbx.ccp_rscode;
424 if (service_rc == 8 && service_rs == 66) {
425 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
426 return -EINVAL;
427 }
428 if (service_rc == 8 && service_rs == 65) {
429 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
430 return -EINVAL;
431 }
432 if (service_rc == 8 && service_rs == 770) {
433 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
434 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
435 return -EAGAIN;
436 }
437 if (service_rc == 8 && service_rs == 783) {
438 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
439 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
440 return -EAGAIN;
441 }
442 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
443 service_rc, service_rs);
444 zdev->online = 0;
445 return -EAGAIN; /* repeat the request on a different device. */
446 }
447 data = msg->text;
448 reply_len = msg->length - 2;
449 if (reply_len > outputdatalength)
450 return -EINVAL;
451 /**
452 * For all encipher requests, the length of the ciphertext (reply_len)
453 * will always equal the modulus length. For MEX decipher requests
454 * the output needs to get padded. Minimum pad size is 10.
455 *
456 * Currently, the cases where padding will be added is for:
457 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
458 * ZERO-PAD and CRT is only supported for PKD requests)
459 * - PCICC, always
460 */
461 pad_len = outputdatalength - reply_len;
462 if (pad_len > 0) {
463 if (pad_len < 10)
464 return -EINVAL;
465 /* 'restore' padding left in the PCICC/PCIXCC card. */
466 if (copy_to_user(outputdata, static_pad, pad_len - 1))
467 return -EFAULT;
468 if (put_user(0, outputdata + pad_len - 1))
469 return -EFAULT;
470 }
471 /* Copy the crypto response to user space. */
472 if (copy_to_user(outputdata + pad_len, data, reply_len))
473 return -EFAULT;
474 return 0;
475}
476
477/**
478 * Copy results from a type 86 XCRB reply message back to user space.
479 *
480 * @zdev: crypto device pointer
481 * @reply: reply AP message.
482 * @xcRB: pointer to XCRB
483 *
484 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
485 */
486static int convert_type86_xcrb(struct zcrypt_device *zdev,
487 struct ap_message *reply,
488 struct ica_xcRB *xcRB)
489{
490 struct type86_fmt2_msg *msg = reply->message;
491 char *data = reply->message;
492
493 /* Copy CPRB to user */
494 if (copy_to_user(xcRB->reply_control_blk_addr,
495 data + msg->fmt2.offset1, msg->fmt2.count1))
496 return -EFAULT;
497 xcRB->reply_control_blk_length = msg->fmt2.count1;
498
499 /* Copy data buffer to user */
500 if (msg->fmt2.count2)
501 if (copy_to_user(xcRB->reply_data_addr,
502 data + msg->fmt2.offset2, msg->fmt2.count2))
503 return -EFAULT;
504 xcRB->reply_data_length = msg->fmt2.count2;
505 return 0;
506}
507
508static int convert_response_ica(struct zcrypt_device *zdev,
509 struct ap_message *reply,
510 char __user *outputdata,
511 unsigned int outputdatalength)
512{
513 struct type86x_reply *msg = reply->message;
514
515 /* Response type byte is the second byte in the response. */
516 switch (((unsigned char *) reply->message)[1]) {
517 case TYPE82_RSP_CODE:
518 case TYPE88_RSP_CODE:
519 return convert_error(zdev, reply);
520 case TYPE86_RSP_CODE:
521 if (msg->hdr.reply_code)
522 return convert_error(zdev, reply);
523 if (msg->cprbx.cprb_ver_id == 0x02)
524 return convert_type86_ica(zdev, reply,
525 outputdata, outputdatalength);
526 /* no break, incorrect cprb version is an unknown response */
527 default: /* Unknown response type, this should NEVER EVER happen */
528 PRINTK("Unrecognized Message Header: %08x%08x\n",
529 *(unsigned int *) reply->message,
530 *(unsigned int *) (reply->message+4));
531 zdev->online = 0;
532 return -EAGAIN; /* repeat the request on a different device. */
533 }
534}
535
536static int convert_response_xcrb(struct zcrypt_device *zdev,
537 struct ap_message *reply,
538 struct ica_xcRB *xcRB)
539{
540 struct type86x_reply *msg = reply->message;
541
542 /* Response type byte is the second byte in the response. */
543 switch (((unsigned char *) reply->message)[1]) {
544 case TYPE82_RSP_CODE:
545 case TYPE88_RSP_CODE:
546 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
547 return convert_error(zdev, reply);
548 case TYPE86_RSP_CODE:
549 if (msg->hdr.reply_code) {
550 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
551 return convert_error(zdev, reply);
552 }
553 if (msg->cprbx.cprb_ver_id == 0x02)
554 return convert_type86_xcrb(zdev, reply, xcRB);
555 /* no break, incorrect cprb version is an unknown response */
556 default: /* Unknown response type, this should NEVER EVER happen */
557 PRINTK("Unrecognized Message Header: %08x%08x\n",
558 *(unsigned int *) reply->message,
559 *(unsigned int *) (reply->message+4));
560 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
561 zdev->online = 0;
562 return -EAGAIN; /* repeat the request on a different device. */
563 }
564}
565
566/**
567 * This function is called from the AP bus code after a crypto request
568 * "msg" has finished with the reply message "reply".
569 * It is called from tasklet context.
570 * @ap_dev: pointer to the AP device
571 * @msg: pointer to the AP message
572 * @reply: pointer to the AP reply message
573 */
574static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
575 struct ap_message *msg,
576 struct ap_message *reply)
577{
578 static struct error_hdr error_reply = {
579 .type = TYPE82_RSP_CODE,
580 .reply_code = REP82_ERROR_MACHINE_FAILURE,
581 };
582 struct response_type *resp_type =
583 (struct response_type *) msg->private;
584 struct type86x_reply *t86r = reply->message;
585 int length;
586
587 /* Copy the reply message to the request message buffer. */
588 if (IS_ERR(reply))
589 memcpy(msg->message, &error_reply, sizeof(error_reply));
590 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
591 t86r->cprbx.cprb_ver_id == 0x02) {
592 switch (resp_type->type) {
593 case PCIXCC_RESPONSE_TYPE_ICA:
594 length = sizeof(struct type86x_reply)
595 + t86r->length - 2;
596 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
597 memcpy(msg->message, reply->message, length);
598 break;
599 case PCIXCC_RESPONSE_TYPE_XCRB:
600 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
601 length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
602 memcpy(msg->message, reply->message, length);
603 break;
604 default:
605 PRINTK("Invalid internal response type: %i\n",
606 resp_type->type);
607 memcpy(msg->message, &error_reply,
608 sizeof error_reply);
609 }
610 } else
611 memcpy(msg->message, reply->message, sizeof error_reply);
612 complete(&(resp_type->work));
613}
614
615static atomic_t zcrypt_step = ATOMIC_INIT(0);
616
617/**
618 * The request distributor calls this function if it picked the PCIXCC/CEX2C
619 * device to handle a modexpo request.
620 * @zdev: pointer to zcrypt_device structure that identifies the
621 * PCIXCC/CEX2C device to the request distributor
622 * @mex: pointer to the modexpo request buffer
623 */
624static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
625 struct ica_rsa_modexpo *mex)
626{
627 struct ap_message ap_msg;
628 struct response_type resp_type = {
629 .type = PCIXCC_RESPONSE_TYPE_ICA,
630 };
631 int rc;
632
633 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
634 if (!ap_msg.message)
635 return -ENOMEM;
636 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
637 atomic_inc_return(&zcrypt_step);
638 ap_msg.private = &resp_type;
639 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
640 if (rc)
641 goto out_free;
642 init_completion(&resp_type.work);
643 ap_queue_message(zdev->ap_dev, &ap_msg);
644 rc = wait_for_completion_interruptible_timeout(
645 &resp_type.work, PCIXCC_CLEANUP_TIME);
646 if (rc > 0)
647 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
648 mex->outputdatalength);
649 else {
650 /* Signal pending or message timed out. */
651 ap_cancel_message(zdev->ap_dev, &ap_msg);
652 if (rc == 0)
653 /* Message timed out. */
654 rc = -ETIME;
655 }
656out_free:
657 free_page((unsigned long) ap_msg.message);
658 return rc;
659}
660
661/**
662 * The request distributor calls this function if it picked the PCIXCC/CEX2C
663 * device to handle a modexpo_crt request.
664 * @zdev: pointer to zcrypt_device structure that identifies the
665 * PCIXCC/CEX2C device to the request distributor
666 * @crt: pointer to the modexpoc_crt request buffer
667 */
668static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
669 struct ica_rsa_modexpo_crt *crt)
670{
671 struct ap_message ap_msg;
672 struct response_type resp_type = {
673 .type = PCIXCC_RESPONSE_TYPE_ICA,
674 };
675 int rc;
676
677 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
678 if (!ap_msg.message)
679 return -ENOMEM;
680 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
681 atomic_inc_return(&zcrypt_step);
682 ap_msg.private = &resp_type;
683 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
684 if (rc)
685 goto out_free;
686 init_completion(&resp_type.work);
687 ap_queue_message(zdev->ap_dev, &ap_msg);
688 rc = wait_for_completion_interruptible_timeout(
689 &resp_type.work, PCIXCC_CLEANUP_TIME);
690 if (rc > 0)
691 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
692 crt->outputdatalength);
693 else {
694 /* Signal pending or message timed out. */
695 ap_cancel_message(zdev->ap_dev, &ap_msg);
696 if (rc == 0)
697 /* Message timed out. */
698 rc = -ETIME;
699 }
700out_free:
701 free_page((unsigned long) ap_msg.message);
702 return rc;
703}
704
705/**
706 * The request distributor calls this function if it picked the PCIXCC/CEX2C
707 * device to handle a send_cprb request.
708 * @zdev: pointer to zcrypt_device structure that identifies the
709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer
711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
713{
714 struct ap_message ap_msg;
715 struct response_type resp_type = {
716 .type = PCIXCC_RESPONSE_TYPE_XCRB,
717 };
718 int rc;
719
720 ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
721 if (!ap_msg.message)
722 return -ENOMEM;
723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
724 atomic_inc_return(&zcrypt_step);
725 ap_msg.private = &resp_type;
726 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
727 if (rc)
728 goto out_free;
729 init_completion(&resp_type.work);
730 ap_queue_message(zdev->ap_dev, &ap_msg);
731 rc = wait_for_completion_interruptible_timeout(
732 &resp_type.work, PCIXCC_CLEANUP_TIME);
733 if (rc > 0)
734 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
735 else {
736 /* Signal pending or message timed out. */
737 ap_cancel_message(zdev->ap_dev, &ap_msg);
738 if (rc == 0)
739 /* Message timed out. */
740 rc = -ETIME;
741 }
742out_free:
743 memset(ap_msg.message, 0x0, ap_msg.length);
744 kfree(ap_msg.message);
745 return rc;
746}
747
748/**
749 * The crypto operations for a PCIXCC/CEX2C card.
750 */
751static struct zcrypt_ops zcrypt_pcixcc_ops = {
752 .rsa_modexpo = zcrypt_pcixcc_modexpo,
753 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
754 .send_cprb = zcrypt_pcixcc_send_cprb,
755};
756
757/**
758 * Micro-code detection function. Its sends a message to a pcixcc card
759 * to find out the microcode level.
760 * @ap_dev: pointer to the AP device.
761 */
762static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
763{
764 static unsigned char msg[] = {
765 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
766 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
767 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
768 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
769 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
770 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
771 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
772 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
773 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
774 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
775 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
776 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
777 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
778 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
779 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
780 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
781 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
782 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
783 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
784 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
785 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
786 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
787 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
788 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
789 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
790 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
791 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
792 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
793 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
794 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
795 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
796 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
797 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
798 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
799 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
800 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
801 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
802 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
803 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
804 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
805 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
806 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
807 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
808 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
809 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
810 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
811 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
812 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
813 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
814 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
815 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
816 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
817 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
818 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
819 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
820 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
821 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
822 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
823 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
824 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
825 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
826 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
827 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
828 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
829 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
830 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
831 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
832 0xF1,0x3D,0x93,0x53
833 };
834 unsigned long long psmid;
835 struct CPRBX *cprbx;
836 char *reply;
837 int rc, i;
838
839 reply = (void *) get_zeroed_page(GFP_KERNEL);
840 if (!reply)
841 return -ENOMEM;
842
843 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
844 if (rc)
845 goto out_free;
846
847 /* Wait for the test message to complete. */
848 for (i = 0; i < 6; i++) {
849 mdelay(300);
850 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
851 if (rc == 0 && psmid == 0x0102030405060708ULL)
852 break;
853 }
854
855 if (i >= 6) {
856 /* Got no answer. */
857 rc = -ENODEV;
858 goto out_free;
859 }
860
861 cprbx = (struct CPRBX *) (reply + 48);
862 if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
863 rc = ZCRYPT_PCIXCC_MCL2;
864 else
865 rc = ZCRYPT_PCIXCC_MCL3;
866out_free:
867 free_page((unsigned long) reply);
868 return rc;
869}
870
871/**
872 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
873 * since the bus_match already checked the hardware type. The PCIXCC
874 * cards come in two flavours: micro code level 2 and micro code level 3.
875 * This is checked by sending a test message to the device.
876 * @ap_dev: pointer to the AP device.
877 */
878static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
879{
880 struct zcrypt_device *zdev;
881 int rc;
882
883 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
884 if (!zdev)
885 return -ENOMEM;
886 zdev->ap_dev = ap_dev;
887 zdev->ops = &zcrypt_pcixcc_ops;
888 zdev->online = 1;
889 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
890 rc = zcrypt_pcixcc_mcl(ap_dev);
891 if (rc < 0) {
892 zcrypt_device_free(zdev);
893 return rc;
894 }
895 zdev->user_space_type = rc;
896 if (rc == ZCRYPT_PCIXCC_MCL2) {
897 zdev->type_string = "PCIXCC_MCL2";
898 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
899 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
900 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
901 } else {
902 zdev->type_string = "PCIXCC_MCL3";
903 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
904 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
905 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
906 }
907 } else {
908 zdev->user_space_type = ZCRYPT_CEX2C;
909 zdev->type_string = "CEX2C";
910 zdev->speed_rating = CEX2C_SPEED_RATING;
911 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
912 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
913 }
914 ap_dev->reply = &zdev->reply;
915 ap_dev->private = zdev;
916 rc = zcrypt_device_register(zdev);
917 if (rc)
918 goto out_free;
919 return 0;
920
921 out_free:
922 ap_dev->private = NULL;
923 zcrypt_device_free(zdev);
924 return rc;
925}
926
927/**
928 * This is called to remove the extended PCIXCC/CEX2C driver information
929 * if an AP device is removed.
930 */
931static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
932{
933 struct zcrypt_device *zdev = ap_dev->private;
934
935 zcrypt_device_unregister(zdev);
936}
937
938int __init zcrypt_pcixcc_init(void)
939{
940 return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
941}
942
943void zcrypt_pcixcc_exit(void)
944{
945 ap_driver_unregister(&zcrypt_pcixcc_driver);
946}
947
948#ifndef CONFIG_ZCRYPT_MONOLITHIC
949module_init(zcrypt_pcixcc_init);
950module_exit(zcrypt_pcixcc_exit);
951#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 000000000000..a78ff307fd19
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,79 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCIXCC_H_
29#define _ZCRYPT_PCIXCC_H_
30
31/**
32 * CPRBX
33 * Note that all shorts and ints are big-endian.
34 * All pointer fields are 16 bytes long, and mean nothing.
35 *
36 * A request CPRB is followed by a request_parameter_block.
37 *
38 * The request (or reply) parameter block is organized thus:
39 * function code
40 * VUD block
41 * key block
42 */
43struct CPRBX {
44 unsigned short cprb_len; /* CPRB length 220 */
45 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
46 unsigned char pad_000[3]; /* Alignment pad bytes */
47 unsigned char func_id[2]; /* function id 0x5432 */
48 unsigned char cprb_flags[4]; /* Flags */
49 unsigned int req_parml; /* request parameter buffer len */
50 unsigned int req_datal; /* request data buffer */
51 unsigned int rpl_msgbl; /* reply message block length */
52 unsigned int rpld_parml; /* replied parameter block len */
53 unsigned int rpl_datal; /* reply data block len */
54 unsigned int rpld_datal; /* replied data block len */
55 unsigned int req_extbl; /* request extension block len */
56 unsigned char pad_001[4]; /* reserved */
57 unsigned int rpld_extbl; /* replied extension block len */
58 unsigned char req_parmb[16]; /* request parm block 'address' */
59 unsigned char req_datab[16]; /* request data block 'address' */
60 unsigned char rpl_parmb[16]; /* reply parm block 'address' */
61 unsigned char rpl_datab[16]; /* reply data block 'address' */
62 unsigned char req_extb[16]; /* request extension block 'addr'*/
63 unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
64 unsigned short ccp_rtcode; /* server return code */
65 unsigned short ccp_rscode; /* server reason code */
66 unsigned int mac_data_len; /* Mac Data Length */
67 unsigned char logon_id[8]; /* Logon Identifier */
68 unsigned char mac_value[8]; /* Mac Value */
69 unsigned char mac_content_flgs;/* Mac content flag byte */
70 unsigned char pad_002; /* Alignment */
71 unsigned short domain; /* Domain */
72 unsigned char pad_003[12]; /* Domain masks */
73 unsigned char pad_004[36]; /* reserved */
74} __attribute__((packed));
75
76int zcrypt_pcixcc_init(void);
77void zcrypt_pcixcc_exit(void);
78
79#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5399c5d99b81..a914129a4da9 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -19,9 +19,6 @@
19 19
20#include "s390mach.h" 20#include "s390mach.h"
21 21
22#define DBG printk
23// #define DBG(args,...) do {} while (0);
24
25static struct semaphore m_sem; 22static struct semaphore m_sem;
26 23
27extern int css_process_crw(int, int); 24extern int css_process_crw(int, int);
@@ -83,11 +80,11 @@ repeat:
83 ccode = stcrw(&crw[chain]); 80 ccode = stcrw(&crw[chain]);
84 if (ccode != 0) 81 if (ccode != 0)
85 break; 82 break;
86 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 83 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
87 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 84 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
88 crw[chain].slct, crw[chain].oflw, crw[chain].chn, 85 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
89 crw[chain].rsc, crw[chain].anc, crw[chain].erc, 86 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
90 crw[chain].rsid); 87 crw[chain].rsid);
91 /* Check for overflows. */ 88 /* Check for overflows. */
92 if (crw[chain].oflw) { 89 if (crw[chain].oflw) {
93 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 90 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
@@ -117,8 +114,8 @@ repeat:
117 * reported to the common I/O layer. 114 * reported to the common I/O layer.
118 */ 115 */
119 if (crw[chain].slct) { 116 if (crw[chain].slct) {
120 DBG(KERN_INFO"solicited machine check for " 117 pr_debug("solicited machine check for "
121 "channel path %02X\n", crw[0].rsid); 118 "channel path %02X\n", crw[0].rsid);
122 break; 119 break;
123 } 120 }
124 switch (crw[0].erc) { 121 switch (crw[0].erc) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 94d1b74db356..7c84b3d4bd94 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -543,7 +543,7 @@ do { \
543} while (0) 543} while (0)
544 544
545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL 545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
546# define ZFCP_LOG_NORMAL(fmt, args...) 546# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
547#else 547#else
548# define ZFCP_LOG_NORMAL(fmt, args...) \ 548# define ZFCP_LOG_NORMAL(fmt, args...) \
549do { \ 549do { \
@@ -553,7 +553,7 @@ do { \
553#endif 553#endif
554 554
555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO 555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
556# define ZFCP_LOG_INFO(fmt, args...) 556# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
557#else 557#else
558# define ZFCP_LOG_INFO(fmt, args...) \ 558# define ZFCP_LOG_INFO(fmt, args...) \
559do { \ 559do { \
@@ -563,14 +563,14 @@ do { \
563#endif 563#endif
564 564
565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG 565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
566# define ZFCP_LOG_DEBUG(fmt, args...) 566# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
567#else 567#else
568# define ZFCP_LOG_DEBUG(fmt, args...) \ 568# define ZFCP_LOG_DEBUG(fmt, args...) \
569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args) 569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
570#endif 570#endif
571 571
572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE 572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
573# define ZFCP_LOG_TRACE(fmt, args...) 573# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
574#else 574#else
575# define ZFCP_LOG_TRACE(fmt, args...) \ 575# define ZFCP_LOG_TRACE(fmt, args...) \
576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args) 576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index d1c1e75bfd60..1e788e815ce7 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,19 +11,18 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
13 13
14struct sysinfo_1_1_1 14struct sysinfo_1_1_1 {
15{
16 char reserved_0[32]; 15 char reserved_0[32];
17 char manufacturer[16]; 16 char manufacturer[16];
18 char type[4]; 17 char type[4];
19 char reserved_1[12]; 18 char reserved_1[12];
20 char model[16]; 19 char model_capacity[16];
21 char sequence[16]; 20 char sequence[16];
22 char plant[4]; 21 char plant[4];
22 char model[16];
23}; 23};
24 24
25struct sysinfo_1_2_1 25struct sysinfo_1_2_1 {
26{
27 char reserved_0[80]; 26 char reserved_0[80];
28 char sequence[16]; 27 char sequence[16];
29 char plant[4]; 28 char plant[4];
@@ -31,9 +30,12 @@ struct sysinfo_1_2_1
31 unsigned short cpu_address; 30 unsigned short cpu_address;
32}; 31};
33 32
34struct sysinfo_1_2_2 33struct sysinfo_1_2_2 {
35{ 34 char format;
36 char reserved_0[32]; 35 char reserved_0[1];
36 unsigned short acc_offset;
37 char reserved_1[24];
38 unsigned int secondary_capability;
37 unsigned int capability; 39 unsigned int capability;
38 unsigned short cpus_total; 40 unsigned short cpus_total;
39 unsigned short cpus_configured; 41 unsigned short cpus_configured;
@@ -42,8 +44,12 @@ struct sysinfo_1_2_2
42 unsigned short adjustment[0]; 44 unsigned short adjustment[0];
43}; 45};
44 46
45struct sysinfo_2_2_1 47struct sysinfo_1_2_2_extension {
46{ 48 unsigned int alt_capability;
49 unsigned short alt_adjustment[0];
50};
51
52struct sysinfo_2_2_1 {
47 char reserved_0[80]; 53 char reserved_0[80];
48 char sequence[16]; 54 char sequence[16];
49 char plant[4]; 55 char plant[4];
@@ -51,15 +57,11 @@ struct sysinfo_2_2_1
51 unsigned short cpu_address; 57 unsigned short cpu_address;
52}; 58};
53 59
54struct sysinfo_2_2_2 60struct sysinfo_2_2_2 {
55{
56 char reserved_0[32]; 61 char reserved_0[32];
57 unsigned short lpar_number; 62 unsigned short lpar_number;
58 char reserved_1; 63 char reserved_1;
59 unsigned char characteristics; 64 unsigned char characteristics;
60 #define LPAR_CHAR_DEDICATED (1 << 7)
61 #define LPAR_CHAR_SHARED (1 << 6)
62 #define LPAR_CHAR_LIMITED (1 << 5)
63 unsigned short cpus_total; 65 unsigned short cpus_total;
64 unsigned short cpus_configured; 66 unsigned short cpus_configured;
65 unsigned short cpus_standby; 67 unsigned short cpus_standby;
@@ -71,12 +73,14 @@ struct sysinfo_2_2_2
71 unsigned short cpus_shared; 73 unsigned short cpus_shared;
72}; 74};
73 75
74struct sysinfo_3_2_2 76#define LPAR_CHAR_DEDICATED (1 << 7)
75{ 77#define LPAR_CHAR_SHARED (1 << 6)
78#define LPAR_CHAR_LIMITED (1 << 5)
79
80struct sysinfo_3_2_2 {
76 char reserved_0[31]; 81 char reserved_0[31];
77 unsigned char count; 82 unsigned char count;
78 struct 83 struct {
79 {
80 char reserved_0[4]; 84 char reserved_0[4];
81 unsigned short cpus_total; 85 unsigned short cpus_total;
82 unsigned short cpus_configured; 86 unsigned short cpus_configured;
@@ -90,136 +94,223 @@ struct sysinfo_3_2_2
90 } vm[8]; 94 } vm[8];
91}; 95};
92 96
93union s390_sysinfo 97static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
94{ 98{
95 struct sysinfo_1_1_1 sysinfo_1_1_1; 99 register int r0 asm("0") = (fc << 28) | sel1;
96 struct sysinfo_1_2_1 sysinfo_1_2_1; 100 register int r1 asm("1") = sel2;
97 struct sysinfo_1_2_2 sysinfo_1_2_2; 101
98 struct sysinfo_2_2_1 sysinfo_2_2_1; 102 asm volatile(
99 struct sysinfo_2_2_2 sysinfo_2_2_2; 103 " stsi 0(%2)\n"
100 struct sysinfo_3_2_2 sysinfo_3_2_2; 104 "0: jz 2f\n"
101}; 105 "1: lhi %0,%3\n"
102 106 "2:\n"
103static inline int stsi (void *sysinfo, 107 EX_TABLE(0b,1b)
104 int fc, int sel1, int sel2) 108 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
105{ 109 : "cc", "memory" );
106 int cc, retv; 110 return r0;
107
108#ifndef CONFIG_64BIT
109 __asm__ __volatile__ ( "lr\t0,%2\n"
110 "\tlr\t1,%3\n"
111 "\tstsi\t0(%4)\n"
112 "0:\tipm\t%0\n"
113 "\tsrl\t%0,28\n"
114 "1:lr\t%1,0\n"
115 ".section .fixup,\"ax\"\n"
116 "2:\tlhi\t%0,3\n"
117 "\tbras\t1,3f\n"
118 "\t.long 1b\n"
119 "3:\tl\t1,0(1)\n"
120 "\tbr\t1\n"
121 ".previous\n"
122 ".section __ex_table,\"a\"\n"
123 "\t.align 4\n"
124 "\t.long 0b,2b\n"
125 ".previous\n"
126 : "=d" (cc), "=d" (retv)
127 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
128 : "cc", "memory", "0", "1" );
129#else
130 __asm__ __volatile__ ( "lr\t0,%2\n"
131 "lr\t1,%3\n"
132 "\tstsi\t0(%4)\n"
133 "0:\tipm\t%0\n"
134 "\tsrl\t%0,28\n"
135 "1:lr\t%1,0\n"
136 ".section .fixup,\"ax\"\n"
137 "2:\tlhi\t%0,3\n"
138 "\tjg\t1b\n"
139 ".previous\n"
140 ".section __ex_table,\"a\"\n"
141 "\t.align 8\n"
142 "\t.quad 0b,2b\n"
143 ".previous\n"
144 : "=d" (cc), "=d" (retv)
145 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
146 : "cc", "memory", "0", "1" );
147#endif
148
149 return cc? -1 : retv;
150} 111}
151 112
152static inline int stsi_0 (void) 113static inline int stsi_0(void)
153{ 114{
154 int rc = stsi (NULL, 0, 0, 0); 115 int rc = stsi (NULL, 0, 0, 0);
155 return rc == -1 ? rc : (((unsigned int)rc) >> 28); 116 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
156} 117}
157 118
158static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info) 119static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
159{ 120{
160 int rc = stsi (info, 1, 1, 1); 121 if (stsi(info, 1, 1, 1) == -ENOSYS)
161 if (rc != -1) 122 return len;
162 { 123
163 EBCASC (info->manufacturer, sizeof(info->manufacturer)); 124 EBCASC(info->manufacturer, sizeof(info->manufacturer));
164 EBCASC (info->type, sizeof(info->type)); 125 EBCASC(info->type, sizeof(info->type));
165 EBCASC (info->model, sizeof(info->model)); 126 EBCASC(info->model, sizeof(info->model));
166 EBCASC (info->sequence, sizeof(info->sequence)); 127 EBCASC(info->sequence, sizeof(info->sequence));
167 EBCASC (info->plant, sizeof(info->plant)); 128 EBCASC(info->plant, sizeof(info->plant));
168 } 129 EBCASC(info->model_capacity, sizeof(info->model_capacity));
169 return rc == -1 ? rc : 0; 130 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
131 info->manufacturer);
132 len += sprintf(page + len, "Type: %-4.4s\n",
133 info->type);
134 if (info->model[0] != '\0')
135 /*
136 * Sigh: the model field has been renamed with System z9
137 * to model_capacity and a new model field has been added
138 * after the plant field. To avoid confusing older programs
139 * the "Model:" prints "model_capacity model" or just
140 * "model_capacity" if the model string is empty .
141 */
142 len += sprintf(page + len,
143 "Model: %-16.16s %-16.16s\n",
144 info->model_capacity, info->model);
145 else
146 len += sprintf(page + len, "Model: %-16.16s\n",
147 info->model_capacity);
148 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
149 info->sequence);
150 len += sprintf(page + len, "Plant: %-4.4s\n",
151 info->plant);
152 len += sprintf(page + len, "Model Capacity: %-16.16s\n",
153 info->model_capacity);
154 return len;
170} 155}
171 156
172static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info) 157#if 0 /* Currently unused */
158static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
173{ 159{
174 int rc = stsi (info, 1, 2, 1); 160 if (stsi(info, 1, 2, 1) == -ENOSYS)
175 if (rc != -1) 161 return len;
176 { 162
177 EBCASC (info->sequence, sizeof(info->sequence)); 163 len += sprintf(page + len, "\n");
178 EBCASC (info->plant, sizeof(info->plant)); 164 EBCASC(info->sequence, sizeof(info->sequence));
179 } 165 EBCASC(info->plant, sizeof(info->plant));
180 return rc == -1 ? rc : 0; 166 len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
167 info->sequence);
168 len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
169 info->plant);
170 return len;
181} 171}
172#endif
182 173
183static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info) 174static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
184{ 175{
185 int rc = stsi (info, 1, 2, 2); 176 struct sysinfo_1_2_2_extension *ext;
186 return rc == -1 ? rc : 0; 177 int i;
178
179 if (stsi(info, 1, 2, 2) == -ENOSYS)
180 return len;
181 ext = (struct sysinfo_1_2_2_extension *)
182 ((unsigned long) info + info->acc_offset);
183
184 len += sprintf(page + len, "\n");
185 len += sprintf(page + len, "CPUs Total: %d\n",
186 info->cpus_total);
187 len += sprintf(page + len, "CPUs Configured: %d\n",
188 info->cpus_configured);
189 len += sprintf(page + len, "CPUs Standby: %d\n",
190 info->cpus_standby);
191 len += sprintf(page + len, "CPUs Reserved: %d\n",
192 info->cpus_reserved);
193
194 if (info->format == 1) {
195 /*
196 * Sigh 2. According to the specification the alternate
197 * capability field is a 32 bit floating point number
198 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange
202 * encoding of the alternate cpu capability.
203 */
204 len += sprintf(page + len, "Capability: %u %u\n",
205 info->capability, ext->alt_capability);
206 for (i = 2; i <= info->cpus_total; i++)
207 len += sprintf(page + len,
208 "Adjustment %02d-way: %u %u\n",
209 i, info->adjustment[i-2],
210 ext->alt_adjustment[i-2]);
211
212 } else {
213 len += sprintf(page + len, "Capability: %u\n",
214 info->capability);
215 for (i = 2; i <= info->cpus_total; i++)
216 len += sprintf(page + len,
217 "Adjustment %02d-way: %u\n",
218 i, info->adjustment[i-2]);
219 }
220
221 if (info->secondary_capability != 0)
222 len += sprintf(page + len, "Secondary Capability: %d\n",
223 info->secondary_capability);
224
225 return len;
187} 226}
188 227
189static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info) 228#if 0 /* Currently unused */
229static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
190{ 230{
191 int rc = stsi (info, 2, 2, 1); 231 if (stsi(info, 2, 2, 1) == -ENOSYS)
192 if (rc != -1) 232 return len;
193 { 233
194 EBCASC (info->sequence, sizeof(info->sequence)); 234 len += sprintf(page + len, "\n");
195 EBCASC (info->plant, sizeof(info->plant)); 235 EBCASC (info->sequence, sizeof(info->sequence));
196 } 236 EBCASC (info->plant, sizeof(info->plant));
197 return rc == -1 ? rc : 0; 237 len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
238 info->sequence);
239 len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
240 info->plant);
241 return len;
198} 242}
243#endif
199 244
200static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info) 245static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
201{ 246{
202 int rc = stsi (info, 2, 2, 2); 247 if (stsi(info, 2, 2, 2) == -ENOSYS)
203 if (rc != -1) 248 return len;
204 { 249
205 EBCASC (info->name, sizeof(info->name)); 250 EBCASC (info->name, sizeof(info->name));
206 } 251
207 return rc == -1 ? rc : 0; 252 len += sprintf(page + len, "\n");
253 len += sprintf(page + len, "LPAR Number: %d\n",
254 info->lpar_number);
255
256 len += sprintf(page + len, "LPAR Characteristics: ");
257 if (info->characteristics & LPAR_CHAR_DEDICATED)
258 len += sprintf(page + len, "Dedicated ");
259 if (info->characteristics & LPAR_CHAR_SHARED)
260 len += sprintf(page + len, "Shared ");
261 if (info->characteristics & LPAR_CHAR_LIMITED)
262 len += sprintf(page + len, "Limited ");
263 len += sprintf(page + len, "\n");
264
265 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
266 info->name);
267
268 len += sprintf(page + len, "LPAR Adjustment: %d\n",
269 info->caf);
270
271 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
272 info->cpus_total);
273 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
274 info->cpus_configured);
275 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
276 info->cpus_standby);
277 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
278 info->cpus_reserved);
279 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
280 info->cpus_dedicated);
281 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
282 info->cpus_shared);
283 return len;
208} 284}
209 285
210static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info) 286static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
211{ 287{
212 int rc = stsi (info, 3, 2, 2); 288 int i;
213 if (rc != -1) 289
214 { 290 if (stsi(info, 3, 2, 2) == -ENOSYS)
215 int i; 291 return len;
216 for (i = 0; i < info->count; i++) 292 for (i = 0; i < info->count; i++) {
217 { 293 EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
218 EBCASC (info->vm[i].name, sizeof(info->vm[i].name)); 294 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
219 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi)); 295 len += sprintf(page + len, "\n");
220 } 296 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
297 i, info->vm[i].name);
298 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
299 i, info->vm[i].cpi);
300
301 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
302 i, info->vm[i].caf);
303
304 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
305 i, info->vm[i].cpus_total);
306 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
307 i, info->vm[i].cpus_configured);
308 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
309 i, info->vm[i].cpus_standby);
310 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
311 i, info->vm[i].cpus_reserved);
221 } 312 }
222 return rc == -1 ? rc : 0; 313 return len;
223} 314}
224 315
225 316
@@ -227,118 +318,34 @@ static int proc_read_sysinfo(char *page, char **start,
227 off_t off, int count, 318 off_t off, int count,
228 int *eof, void *data) 319 int *eof, void *data)
229{ 320{
230 unsigned long info_page = get_zeroed_page (GFP_KERNEL); 321 unsigned long info = get_zeroed_page (GFP_KERNEL);
231 union s390_sysinfo *info = (union s390_sysinfo *) info_page; 322 int level, len;
232 int len = 0;
233 int level;
234 int i;
235 323
236 if (!info) 324 if (!info)
237 return 0; 325 return 0;
238 326
239 level = stsi_0 (); 327 len = 0;
240 328 level = stsi_0();
241 if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0) 329 if (level >= 1)
242 { 330 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
243 len += sprintf (page+len, "Manufacturer: %-16.16s\n",
244 info->sysinfo_1_1_1.manufacturer);
245 len += sprintf (page+len, "Type: %-4.4s\n",
246 info->sysinfo_1_1_1.type);
247 len += sprintf (page+len, "Model: %-16.16s\n",
248 info->sysinfo_1_1_1.model);
249 len += sprintf (page+len, "Sequence Code: %-16.16s\n",
250 info->sysinfo_1_1_1.sequence);
251 len += sprintf (page+len, "Plant: %-4.4s\n",
252 info->sysinfo_1_1_1.plant);
253 }
254
255 if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
256 {
257 len += sprintf (page+len, "\n");
258 len += sprintf (page+len, "CPUs Total: %d\n",
259 info->sysinfo_1_2_2.cpus_total);
260 len += sprintf (page+len, "CPUs Configured: %d\n",
261 info->sysinfo_1_2_2.cpus_configured);
262 len += sprintf (page+len, "CPUs Standby: %d\n",
263 info->sysinfo_1_2_2.cpus_standby);
264 len += sprintf (page+len, "CPUs Reserved: %d\n",
265 info->sysinfo_1_2_2.cpus_reserved);
266
267 len += sprintf (page+len, "Capability: %d\n",
268 info->sysinfo_1_2_2.capability);
269 331
270 for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++) 332 if (level >= 1)
271 len += sprintf (page+len, "Adjustment %02d-way: %d\n", 333 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
272 i, info->sysinfo_1_2_2.adjustment[i-2]);
273 }
274 334
275 if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0) 335 if (level >= 2)
276 { 336 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
277 len += sprintf (page+len, "\n");
278 len += sprintf (page+len, "LPAR Number: %d\n",
279 info->sysinfo_2_2_2.lpar_number);
280
281 len += sprintf (page+len, "LPAR Characteristics: ");
282 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
283 len += sprintf (page+len, "Dedicated ");
284 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
285 len += sprintf (page+len, "Shared ");
286 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
287 len += sprintf (page+len, "Limited ");
288 len += sprintf (page+len, "\n");
289
290 len += sprintf (page+len, "LPAR Name: %-8.8s\n",
291 info->sysinfo_2_2_2.name);
292
293 len += sprintf (page+len, "LPAR Adjustment: %d\n",
294 info->sysinfo_2_2_2.caf);
295
296 len += sprintf (page+len, "LPAR CPUs Total: %d\n",
297 info->sysinfo_2_2_2.cpus_total);
298 len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
299 info->sysinfo_2_2_2.cpus_configured);
300 len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
301 info->sysinfo_2_2_2.cpus_standby);
302 len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
303 info->sysinfo_2_2_2.cpus_reserved);
304 len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
305 info->sysinfo_2_2_2.cpus_dedicated);
306 len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
307 info->sysinfo_2_2_2.cpus_shared);
308 }
309 337
310 if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0) 338 if (level >= 3)
311 { 339 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
312 for (i = 0; i < info->sysinfo_3_2_2.count; i++)
313 {
314 len += sprintf (page+len, "\n");
315 len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
316 i, info->sysinfo_3_2_2.vm[i].name);
317 len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
318 i, info->sysinfo_3_2_2.vm[i].cpi);
319
320 len += sprintf (page+len, "VM%02d Adjustment: %d\n",
321 i, info->sysinfo_3_2_2.vm[i].caf);
322
323 len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
324 i, info->sysinfo_3_2_2.vm[i].cpus_total);
325 len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
326 i, info->sysinfo_3_2_2.vm[i].cpus_configured);
327 len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
328 i, info->sysinfo_3_2_2.vm[i].cpus_standby);
329 len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
330 i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
331 }
332 }
333 340
334 free_page (info_page); 341 free_page (info);
335 return len; 342 return len;
336} 343}
337 344
338static __init int create_proc_sysinfo(void) 345static __init int create_proc_sysinfo(void)
339{ 346{
340 create_proc_read_entry ("sysinfo", 0444, NULL, 347 create_proc_read_entry("sysinfo", 0444, NULL,
341 proc_read_sysinfo, NULL); 348 proc_read_sysinfo, NULL);
342 return 0; 349 return 0;
343} 350}
344 351
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index ed8955f49e47..979145026a29 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -1,4 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3unifdef-y += cmb.h debug.h 3unifdef-y += cmb.h debug.h
4header-y += dasd.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h 4header-y += dasd.h monwriter.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h
diff --git a/include/asm-s390/appldata.h b/include/asm-s390/appldata.h
new file mode 100644
index 000000000000..b1770703b706
--- /dev/null
+++ b/include/asm-s390/appldata.h
@@ -0,0 +1,90 @@
1/*
2 * include/asm-s390/appldata.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 *
6 * Author(s): Melissa Howland <melissah@us.ibm.com>
7 */
8
9#ifndef _ASM_S390_APPLDATA_H
10#define _ASM_S390_APPLDATA_H
11
12#include <asm/io.h>
13
14#ifndef CONFIG_64BIT
15
16#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
17#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
18#define APPLDATA_GEN_EVENT_REC 0x02
19#define APPLDATA_START_CONFIG_REC 0x03
20
21/*
22 * Parameter list for DIAGNOSE X'DC'
23 */
24struct appldata_parameter_list {
25 u16 diag; /* The DIAGNOSE code X'00DC' */
26 u8 function; /* The function code for the DIAGNOSE */
27 u8 parlist_length; /* Length of the parameter list */
28 u32 product_id_addr; /* Address of the 16-byte product ID */
29 u16 reserved;
30 u16 buffer_length; /* Length of the application data buffer */
31 u32 buffer_addr; /* Address of the application data buffer */
32} __attribute__ ((packed));
33
34#else /* CONFIG_64BIT */
35
36#define APPLDATA_START_INTERVAL_REC 0x80
37#define APPLDATA_STOP_REC 0x81
38#define APPLDATA_GEN_EVENT_REC 0x82
39#define APPLDATA_START_CONFIG_REC 0x83
40
41/*
42 * Parameter list for DIAGNOSE X'DC'
43 */
44struct appldata_parameter_list {
45 u16 diag;
46 u8 function;
47 u8 parlist_length;
48 u32 unused01;
49 u16 reserved;
50 u16 buffer_length;
51 u32 unused02;
52 u64 product_id_addr;
53 u64 buffer_addr;
54} __attribute__ ((packed));
55
56#endif /* CONFIG_64BIT */
57
58struct appldata_product_id {
59 char prod_nr[7]; /* product number */
60 u16 prod_fn; /* product function */
61 u8 record_nr; /* record number */
62 u16 version_nr; /* version */
63 u16 release_nr; /* release */
64 u16 mod_lvl; /* modification level */
65} __attribute__ ((packed));
66
67static inline int appldata_asm(struct appldata_product_id *id,
68 unsigned short fn, void *buffer,
69 unsigned short length)
70{
71 struct appldata_parameter_list parm_list;
72 int ry;
73
74 if (!MACHINE_IS_VM)
75 return -ENOSYS;
76 parm_list.diag = 0xdc;
77 parm_list.function = fn;
78 parm_list.parlist_length = sizeof(parm_list);
79 parm_list.buffer_length = length;
80 parm_list.product_id_addr = (unsigned long) id;
81 parm_list.buffer_addr = virt_to_phys(buffer);
82 asm volatile(
83 "diag %1,%0,0xdc"
84 : "=d" (ry)
85 : "d" (&parm_list), "m" (parm_list), "m" (*id)
86 : "cc");
87 return ry;
88}
89
90#endif /* _ASM_S390_APPLDATA_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 28fdd6e2b8ba..da063cd5f0a0 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -270,6 +270,11 @@ struct diag210 {
270 __u32 vrdccrft : 8; /* real device feature (output) */ 270 __u32 vrdccrft : 8; /* real device feature (output) */
271} __attribute__ ((packed,aligned(4))); 271} __attribute__ ((packed,aligned(4)));
272 272
273struct ccw_dev_id {
274 u8 ssid;
275 u16 devno;
276};
277
273extern int diag210(struct diag210 *addr); 278extern int diag210(struct diag210 *addr);
274 279
275extern void wait_cons_dev(void); 280extern void wait_cons_dev(void);
@@ -280,6 +285,8 @@ extern void cio_reset_channel_paths(void);
280 285
281extern void css_schedule_reprobe(void); 286extern void css_schedule_reprobe(void);
282 287
288extern void reipl_ccw_dev(struct ccw_dev_id *id);
289
283#endif 290#endif
284 291
285#endif 292#endif
diff --git a/include/asm-s390/dma.h b/include/asm-s390/dma.h
index 02720c449cd8..7425c6af6cd4 100644
--- a/include/asm-s390/dma.h
+++ b/include/asm-s390/dma.h
@@ -11,6 +11,6 @@
11 11
12#define MAX_DMA_ADDRESS 0x80000000 12#define MAX_DMA_ADDRESS 0x80000000
13 13
14#define free_dma(x) 14#define free_dma(x) do { } while (0)
15 15
16#endif /* _ASM_DMA_H */ 16#endif /* _ASM_DMA_H */
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index ffedf14f89f6..5e261e1de671 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -7,75 +7,21 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10#ifndef __s390x__
11#define __futex_atomic_fixup \
12 ".section __ex_table,\"a\"\n" \
13 " .align 4\n" \
14 " .long 0b,4b,2b,4b,3b,4b\n" \
15 ".previous"
16#else /* __s390x__ */
17#define __futex_atomic_fixup \
18 ".section __ex_table,\"a\"\n" \
19 " .align 8\n" \
20 " .quad 0b,4b,2b,4b,3b,4b\n" \
21 ".previous"
22#endif /* __s390x__ */
23
24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
25 asm volatile(" sacf 256\n" \
26 "0: l %1,0(%6)\n" \
27 "1: " insn \
28 "2: cs %1,%2,0(%6)\n" \
29 "3: jl 1b\n" \
30 " lhi %0,0\n" \
31 "4: sacf 0\n" \
32 __futex_atomic_fixup \
33 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
34 "=m" (*uaddr) \
35 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
36 "m" (*uaddr) : "cc" );
37
38static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 10static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
39{ 11{
40 int op = (encoded_op >> 28) & 7; 12 int op = (encoded_op >> 28) & 7;
41 int cmp = (encoded_op >> 24) & 15; 13 int cmp = (encoded_op >> 24) & 15;
42 int oparg = (encoded_op << 8) >> 20; 14 int oparg = (encoded_op << 8) >> 20;
43 int cmparg = (encoded_op << 20) >> 20; 15 int cmparg = (encoded_op << 20) >> 20;
44 int oldval = 0, newval, ret; 16 int oldval, ret;
17
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 19 oparg = 1 << oparg;
47 20
48 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
49 return -EFAULT; 22 return -EFAULT;
50 23
51 inc_preempt_count(); 24 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
52
53 switch (op) {
54 case FUTEX_OP_SET:
55 __futex_atomic_op("lr %2,%5\n",
56 ret, oldval, newval, uaddr, oparg);
57 break;
58 case FUTEX_OP_ADD:
59 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
60 ret, oldval, newval, uaddr, oparg);
61 break;
62 case FUTEX_OP_OR:
63 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
64 ret, oldval, newval, uaddr, oparg);
65 break;
66 case FUTEX_OP_ANDN:
67 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
68 ret, oldval, newval, uaddr, oparg);
69 break;
70 case FUTEX_OP_XOR:
71 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
72 ret, oldval, newval, uaddr, oparg);
73 break;
74 default:
75 ret = -ENOSYS;
76 }
77
78 dec_preempt_count();
79 25
80 if (!ret) { 26 if (!ret) {
81 switch (cmp) { 27 switch (cmp) {
@@ -91,32 +37,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
91 return ret; 37 return ret;
92} 38}
93 39
94static inline int 40static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
95futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 41 int oldval, int newval)
96{ 42{
97 int ret;
98
99 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
100 return -EFAULT; 44 return -EFAULT;
101 asm volatile(" sacf 256\n" 45
102 " cs %1,%4,0(%5)\n" 46 return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
103 "0: lr %0,%1\n"
104 "1: sacf 0\n"
105#ifndef __s390x__
106 ".section __ex_table,\"a\"\n"
107 " .align 4\n"
108 " .long 0b,1b\n"
109 ".previous"
110#else /* __s390x__ */
111 ".section __ex_table,\"a\"\n"
112 " .align 8\n"
113 " .quad 0b,1b\n"
114 ".previous"
115#endif /* __s390x__ */
116 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
117 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
118 : "cc", "memory" );
119 return oldval;
120} 47}
121 48
122#endif /* __KERNEL__ */ 49#endif /* __KERNEL__ */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index d4614b35f423..a6cc27e77007 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -116,7 +116,7 @@ extern void iounmap(void *addr);
116#define outb(x,addr) ((void) writeb(x,addr)) 116#define outb(x,addr) ((void) writeb(x,addr))
117#define outb_p(x,addr) outb(x,addr) 117#define outb_p(x,addr) outb(x,addr)
118 118
119#define mmiowb() 119#define mmiowb() do { } while (0)
120 120
121/* 121/*
122 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 122 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
new file mode 100644
index 000000000000..40cc68025e01
--- /dev/null
+++ b/include/asm-s390/kdebug.h
@@ -0,0 +1,59 @@
1#ifndef _S390_KDEBUG_H
2#define _S390_KDEBUG_H
3
4/*
5 * Feb 2006 Ported to s390 <grundym@us.ibm.com>
6 */
7#include <linux/notifier.h>
8
9struct pt_regs;
10
11struct die_args {
12 struct pt_regs *regs;
13 const char *str;
14 long err;
15 int trapnr;
16 int signr;
17};
18
19/* Note - you should never unregister because that can race with NMIs.
20 * If you really want to do it first unregister - then synchronize_sched
21 * - then free.
22 */
23extern int register_die_notifier(struct notifier_block *);
24extern int unregister_die_notifier(struct notifier_block *);
25extern int register_page_fault_notifier(struct notifier_block *);
26extern int unregister_page_fault_notifier(struct notifier_block *);
27extern struct atomic_notifier_head s390die_chain;
28
29
30enum die_val {
31 DIE_OOPS = 1,
32 DIE_BPT,
33 DIE_SSTEP,
34 DIE_PANIC,
35 DIE_NMI,
36 DIE_DIE,
37 DIE_NMIWATCHDOG,
38 DIE_KERNELDEBUG,
39 DIE_TRAP,
40 DIE_GPF,
41 DIE_CALL,
42 DIE_NMI_IPI,
43 DIE_PAGE_FAULT,
44};
45
46static inline int notify_die(enum die_val val, const char *str,
47 struct pt_regs *regs, long err, int trap, int sig)
48{
49 struct die_args args = {
50 .regs = regs,
51 .str = str,
52 .err = err,
53 .trapnr = trap,
54 .signr = sig
55 };
56 return atomic_notifier_call_chain(&s390die_chain, val, &args);
57}
58
59#endif
diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h
new file mode 100644
index 000000000000..b847ff0ec3fa
--- /dev/null
+++ b/include/asm-s390/kprobes.h
@@ -0,0 +1,114 @@
1#ifndef _ASM_S390_KPROBES_H
2#define _ASM_S390_KPROBES_H
3/*
4 * Kernel Probes (KProbes)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright (C) IBM Corporation, 2002, 2006
21 *
22 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
23 * Probes initial implementation ( includes suggestions from
24 * Rusty Russell).
25 * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
26 * <ananth@in.ibm.com>
27 * 2005-Dec Used as a template for s390 by Mike Grundy
28 * <grundym@us.ibm.com>
29 */
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/percpu.h>
33
34#define __ARCH_WANT_KPROBES_INSN_SLOT
35struct pt_regs;
36struct kprobe;
37
38typedef u16 kprobe_opcode_t;
39#define BREAKPOINT_INSTRUCTION 0x0002
40
41/* Maximum instruction size is 3 (16bit) halfwords: */
42#define MAX_INSN_SIZE 0x0003
43#define MAX_STACK_SIZE 64
44#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
45 (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
46 ? (MAX_STACK_SIZE) \
47 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
48
49#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry)
50
51#define ARCH_SUPPORTS_KRETPROBES
52#define ARCH_INACTIVE_KPROBE_COUNT 0
53
54#define KPROBE_SWAP_INST 0x10
55
56#define FIXUP_PSW_NORMAL 0x08
57#define FIXUP_BRANCH_NOT_TAKEN 0x04
58#define FIXUP_RETURN_REGISTER 0x02
59#define FIXUP_NOT_REQUIRED 0x01
60
61/* Architecture specific copy of original instruction */
62struct arch_specific_insn {
63 /* copy of original instruction */
64 kprobe_opcode_t *insn;
65 int fixup;
66 int ilen;
67 int reg;
68};
69
70struct ins_replace_args {
71 kprobe_opcode_t *ptr;
72 kprobe_opcode_t old;
73 kprobe_opcode_t new;
74};
75struct prev_kprobe {
76 struct kprobe *kp;
77 unsigned long status;
78 unsigned long saved_psw;
79 unsigned long kprobe_saved_imask;
80 unsigned long kprobe_saved_ctl[3];
81};
82
83/* per-cpu kprobe control block */
84struct kprobe_ctlblk {
85 unsigned long kprobe_status;
86 unsigned long kprobe_saved_imask;
87 unsigned long kprobe_saved_ctl[3];
88 struct pt_regs jprobe_saved_regs;
89 unsigned long jprobe_saved_r14;
90 unsigned long jprobe_saved_r15;
91 struct prev_kprobe prev_kprobe;
92 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
93};
94
95void arch_remove_kprobe(struct kprobe *p);
96void kretprobe_trampoline(void);
97int is_prohibited_opcode(kprobe_opcode_t *instruction);
98void get_instruction_type(struct arch_specific_insn *ainsn);
99
100#define flush_insn_slot(p) do { } while (0)
101
102#endif /* _ASM_S390_KPROBES_H */
103
104#ifdef CONFIG_KPROBES
105
106extern int kprobe_exceptions_notify(struct notifier_block *self,
107 unsigned long val, void *data);
108#else /* !CONFIG_KPROBES */
109static inline int kprobe_exceptions_notify(struct notifier_block *self,
110 unsigned long val, void *data)
111{
112 return 0;
113}
114#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 596c8b172104..18695d10dedf 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -35,6 +35,7 @@
35#define __LC_IO_NEW_PSW 0x01f0 35#define __LC_IO_NEW_PSW 0x01f0
36#endif /* !__s390x__ */ 36#endif /* !__s390x__ */
37 37
38#define __LC_IPL_PARMBLOCK_PTR 0x014
38#define __LC_EXT_PARAMS 0x080 39#define __LC_EXT_PARAMS 0x080
39#define __LC_CPU_ADDRESS 0x084 40#define __LC_CPU_ADDRESS 0x084
40#define __LC_EXT_INT_CODE 0x086 41#define __LC_EXT_INT_CODE 0x086
@@ -47,6 +48,7 @@
47#define __LC_PER_ATMID 0x096 48#define __LC_PER_ATMID 0x096
48#define __LC_PER_ADDRESS 0x098 49#define __LC_PER_ADDRESS 0x098
49#define __LC_PER_ACCESS_ID 0x0A1 50#define __LC_PER_ACCESS_ID 0x0A1
51#define __LC_AR_MODE_ID 0x0A3
50 52
51#define __LC_SUBCHANNEL_ID 0x0B8 53#define __LC_SUBCHANNEL_ID 0x0B8
52#define __LC_SUBCHANNEL_NR 0x0BA 54#define __LC_SUBCHANNEL_NR 0x0BA
@@ -106,18 +108,28 @@
106#define __LC_INT_CLOCK 0xDE8 108#define __LC_INT_CLOCK 0xDE8
107#endif /* __s390x__ */ 109#endif /* __s390x__ */
108 110
109#define __LC_PANIC_MAGIC 0xE00
110 111
112#define __LC_PANIC_MAGIC 0xE00
111#ifndef __s390x__ 113#ifndef __s390x__
112#define __LC_PFAULT_INTPARM 0x080 114#define __LC_PFAULT_INTPARM 0x080
113#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 115#define __LC_CPU_TIMER_SAVE_AREA 0x0D8
116#define __LC_CLOCK_COMP_SAVE_AREA 0x0E0
117#define __LC_PSW_SAVE_AREA 0x100
118#define __LC_PREFIX_SAVE_AREA 0x108
114#define __LC_AREGS_SAVE_AREA 0x120 119#define __LC_AREGS_SAVE_AREA 0x120
120#define __LC_FPREGS_SAVE_AREA 0x160
115#define __LC_GPREGS_SAVE_AREA 0x180 121#define __LC_GPREGS_SAVE_AREA 0x180
116#define __LC_CREGS_SAVE_AREA 0x1C0 122#define __LC_CREGS_SAVE_AREA 0x1C0
117#else /* __s390x__ */ 123#else /* __s390x__ */
118#define __LC_PFAULT_INTPARM 0x11B8 124#define __LC_PFAULT_INTPARM 0x11B8
125#define __LC_FPREGS_SAVE_AREA 0x1200
119#define __LC_GPREGS_SAVE_AREA 0x1280 126#define __LC_GPREGS_SAVE_AREA 0x1280
127#define __LC_PSW_SAVE_AREA 0x1300
128#define __LC_PREFIX_SAVE_AREA 0x1318
129#define __LC_FP_CREG_SAVE_AREA 0x131C
130#define __LC_TODREG_SAVE_AREA 0x1324
120#define __LC_CPU_TIMER_SAVE_AREA 0x1328 131#define __LC_CPU_TIMER_SAVE_AREA 0x1328
132#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
121#define __LC_AREGS_SAVE_AREA 0x1340 133#define __LC_AREGS_SAVE_AREA 0x1340
122#define __LC_CREGS_SAVE_AREA 0x1380 134#define __LC_CREGS_SAVE_AREA 0x1380
123#endif /* __s390x__ */ 135#endif /* __s390x__ */
diff --git a/include/asm-s390/monwriter.h b/include/asm-s390/monwriter.h
new file mode 100644
index 000000000000..f0cbf96c52e6
--- /dev/null
+++ b/include/asm-s390/monwriter.h
@@ -0,0 +1,33 @@
1/*
2 * include/asm-s390/monwriter.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Character device driver for writing z/VM APPLDATA monitor records
6 * Version 1.0
7 * Author(s): Melissa Howland <melissah@us.ibm.com>
8 *
9 */
10
11#ifndef _ASM_390_MONWRITER_H
12#define _ASM_390_MONWRITER_H
13
14/* mon_function values */
15#define MONWRITE_START_INTERVAL 0x00 /* start interval recording */
16#define MONWRITE_STOP_INTERVAL 0x01 /* stop interval or config recording */
17#define MONWRITE_GEN_EVENT 0x02 /* generate event record */
18#define MONWRITE_START_CONFIG 0x03 /* start configuration recording */
19
20/* the header the app uses in its write() data */
21struct monwrite_hdr {
22 unsigned char mon_function;
23 unsigned short applid;
24 unsigned char record_num;
25 unsigned short version;
26 unsigned short release;
27 unsigned short mod_level;
28 unsigned short datalen;
29 unsigned char hdrlen;
30
31} __attribute__((packed));
32
33#endif /* _ASM_390_MONWRITER_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index a78e853e0dd5..803bc7064418 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -22,6 +22,16 @@
22extern void diag10(unsigned long addr); 22extern void diag10(unsigned long addr);
23 23
24/* 24/*
25 * Page allocation orders.
26 */
27#ifndef __s390x__
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PMD_ALLOC_ORDER 2
31# define PGD_ALLOC_ORDER 2
32#endif /* __s390x__ */
33
34/*
25 * Allocate and free page tables. The xxx_kernel() versions are 35 * Allocate and free page tables. The xxx_kernel() versions are
26 * used to allocate a kernel page table - this turns on ASN bits 36 * used to allocate a kernel page table - this turns on ASN bits
27 * if any. 37 * if any.
@@ -29,30 +39,23 @@ extern void diag10(unsigned long addr);
29 39
30static inline pgd_t *pgd_alloc(struct mm_struct *mm) 40static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{ 41{
32 pgd_t *pgd; 42 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
33 int i; 43 int i;
34 44
45 if (!pgd)
46 return NULL;
47 for (i = 0; i < PTRS_PER_PGD; i++)
35#ifndef __s390x__ 48#ifndef __s390x__
36 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1); 49 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
37 if (pgd != NULL) 50#else
38 for (i = 0; i < USER_PTRS_PER_PGD; i++) 51 pgd_clear(pgd + i);
39 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 52#endif
40#else /* __s390x__ */
41 pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
42 if (pgd != NULL)
43 for (i = 0; i < PTRS_PER_PGD; i++)
44 pgd_clear(pgd + i);
45#endif /* __s390x__ */
46 return pgd; 53 return pgd;
47} 54}
48 55
49static inline void pgd_free(pgd_t *pgd) 56static inline void pgd_free(pgd_t *pgd)
50{ 57{
51#ifndef __s390x__ 58 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
52 free_pages((unsigned long) pgd, 1);
53#else /* __s390x__ */
54 free_pages((unsigned long) pgd, 2);
55#endif /* __s390x__ */
56} 59}
57 60
58#ifndef __s390x__ 61#ifndef __s390x__
@@ -68,20 +71,19 @@ static inline void pgd_free(pgd_t *pgd)
68#else /* __s390x__ */ 71#else /* __s390x__ */
69static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 72static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
70{ 73{
71 pmd_t *pmd; 74 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
72 int i; 75 int i;
73 76
74 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2); 77 if (!pmd)
75 if (pmd != NULL) { 78 return NULL;
76 for (i=0; i < PTRS_PER_PMD; i++) 79 for (i=0; i < PTRS_PER_PMD; i++)
77 pmd_clear(pmd+i); 80 pmd_clear(pmd + i);
78 }
79 return pmd; 81 return pmd;
80} 82}
81 83
82static inline void pmd_free (pmd_t *pmd) 84static inline void pmd_free (pmd_t *pmd)
83{ 85{
84 free_pages((unsigned long) pmd, 2); 86 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
85} 87}
86 88
87#define __pmd_free_tlb(tlb,pmd) \ 89#define __pmd_free_tlb(tlb,pmd) \
@@ -123,15 +125,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
123static inline pte_t * 125static inline pte_t *
124pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 126pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
125{ 127{
126 pte_t *pte; 128 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
127 int i; 129 int i;
128 130
129 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 131 if (!pte)
130 if (pte != NULL) { 132 return NULL;
131 for (i=0; i < PTRS_PER_PTE; i++) { 133 for (i=0; i < PTRS_PER_PTE; i++) {
132 pte_clear(mm, vmaddr, pte+i); 134 pte_clear(mm, vmaddr, pte + i);
133 vmaddr += PAGE_SIZE; 135 vmaddr += PAGE_SIZE;
134 }
135 } 136 }
136 return pte; 137 return pte;
137} 138}
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 24312387fa24..1a07028d575e 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -89,19 +89,6 @@ extern char empty_zero_page[PAGE_SIZE];
89# define PTRS_PER_PGD 2048 89# define PTRS_PER_PGD 2048
90#endif /* __s390x__ */ 90#endif /* __s390x__ */
91 91
92/*
93 * pgd entries used up by user/kernel:
94 */
95#ifndef __s390x__
96# define USER_PTRS_PER_PGD 512
97# define USER_PGD_PTRS 512
98# define KERNEL_PGD_PTRS 512
99#else /* __s390x__ */
100# define USER_PTRS_PER_PGD 2048
101# define USER_PGD_PTRS 2048
102# define KERNEL_PGD_PTRS 2048
103#endif /* __s390x__ */
104
105#define FIRST_USER_ADDRESS 0 92#define FIRST_USER_ADDRESS 0
106 93
107#define pte_ERROR(e) \ 94#define pte_ERROR(e) \
@@ -216,12 +203,14 @@ extern char empty_zero_page[PAGE_SIZE];
216#define _PAGE_RO 0x200 /* HW read-only */ 203#define _PAGE_RO 0x200 /* HW read-only */
217#define _PAGE_INVALID 0x400 /* HW invalid */ 204#define _PAGE_INVALID 0x400 /* HW invalid */
218 205
219/* Mask and four different kinds of invalid pages. */ 206/* Mask and six different types of pages. */
220#define _PAGE_INVALID_MASK 0x601 207#define _PAGE_TYPE_MASK 0x601
221#define _PAGE_INVALID_EMPTY 0x400 208#define _PAGE_TYPE_EMPTY 0x400
222#define _PAGE_INVALID_NONE 0x401 209#define _PAGE_TYPE_NONE 0x401
223#define _PAGE_INVALID_SWAP 0x600 210#define _PAGE_TYPE_SWAP 0x600
224#define _PAGE_INVALID_FILE 0x601 211#define _PAGE_TYPE_FILE 0x601
212#define _PAGE_TYPE_RO 0x200
213#define _PAGE_TYPE_RW 0x000
225 214
226#ifndef __s390x__ 215#ifndef __s390x__
227 216
@@ -280,15 +269,14 @@ extern char empty_zero_page[PAGE_SIZE];
280#endif /* __s390x__ */ 269#endif /* __s390x__ */
281 270
282/* 271/*
283 * No mapping available 272 * Page protection definitions.
284 */ 273 */
285#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) 274#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
286#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) 275#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
287#define PAGE_RO_SHARED __pgprot(_PAGE_RO) 276#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
288#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) 277
289#define PAGE_COPY __pgprot(_PAGE_RO) 278#define PAGE_KERNEL PAGE_RW
290#define PAGE_SHARED __pgprot(0) 279#define PAGE_COPY PAGE_RO
291#define PAGE_KERNEL __pgprot(0)
292 280
293/* 281/*
294 * The S390 can't do page protection for execute, and considers that the 282 * The S390 can't do page protection for execute, and considers that the
@@ -296,23 +284,23 @@ extern char empty_zero_page[PAGE_SIZE];
296 * the closest we can get.. 284 * the closest we can get..
297 */ 285 */
298 /*xwr*/ 286 /*xwr*/
299#define __P000 PAGE_NONE_PRIVATE 287#define __P000 PAGE_NONE
300#define __P001 PAGE_RO_PRIVATE 288#define __P001 PAGE_RO
301#define __P010 PAGE_COPY 289#define __P010 PAGE_RO
302#define __P011 PAGE_COPY 290#define __P011 PAGE_RO
303#define __P100 PAGE_RO_PRIVATE 291#define __P100 PAGE_RO
304#define __P101 PAGE_RO_PRIVATE 292#define __P101 PAGE_RO
305#define __P110 PAGE_COPY 293#define __P110 PAGE_RO
306#define __P111 PAGE_COPY 294#define __P111 PAGE_RO
307 295
308#define __S000 PAGE_NONE_SHARED 296#define __S000 PAGE_NONE
309#define __S001 PAGE_RO_SHARED 297#define __S001 PAGE_RO
310#define __S010 PAGE_SHARED 298#define __S010 PAGE_RW
311#define __S011 PAGE_SHARED 299#define __S011 PAGE_RW
312#define __S100 PAGE_RO_SHARED 300#define __S100 PAGE_RO
313#define __S101 PAGE_RO_SHARED 301#define __S101 PAGE_RO
314#define __S110 PAGE_SHARED 302#define __S110 PAGE_RW
315#define __S111 PAGE_SHARED 303#define __S111 PAGE_RW
316 304
317/* 305/*
318 * Certain architectures need to do special things when PTEs 306 * Certain architectures need to do special things when PTEs
@@ -377,18 +365,18 @@ static inline int pmd_bad(pmd_t pmd)
377 365
378static inline int pte_none(pte_t pte) 366static inline int pte_none(pte_t pte)
379{ 367{
380 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; 368 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_EMPTY;
381} 369}
382 370
383static inline int pte_present(pte_t pte) 371static inline int pte_present(pte_t pte)
384{ 372{
385 return !(pte_val(pte) & _PAGE_INVALID) || 373 return !(pte_val(pte) & _PAGE_INVALID) ||
386 (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; 374 (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_NONE;
387} 375}
388 376
389static inline int pte_file(pte_t pte) 377static inline int pte_file(pte_t pte)
390{ 378{
391 return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; 379 return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_FILE;
392} 380}
393 381
394#define pte_same(a,b) (pte_val(a) == pte_val(b)) 382#define pte_same(a,b) (pte_val(a) == pte_val(b))
@@ -461,7 +449,7 @@ static inline void pmd_clear(pmd_t * pmdp)
461 449
462static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 450static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
463{ 451{
464 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 452 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
465} 453}
466 454
467/* 455/*
@@ -477,7 +465,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
477 465
478static inline pte_t pte_wrprotect(pte_t pte) 466static inline pte_t pte_wrprotect(pte_t pte)
479{ 467{
480 /* Do not clobber _PAGE_INVALID_NONE pages! */ 468 /* Do not clobber _PAGE_TYPE_NONE pages! */
481 if (!(pte_val(pte) & _PAGE_INVALID)) 469 if (!(pte_val(pte) & _PAGE_INVALID))
482 pte_val(pte) |= _PAGE_RO; 470 pte_val(pte) |= _PAGE_RO;
483 return pte; 471 return pte;
@@ -556,26 +544,30 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
556 return pte; 544 return pte;
557} 545}
558 546
559static inline pte_t 547static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
560ptep_clear_flush(struct vm_area_struct *vma,
561 unsigned long address, pte_t *ptep)
562{ 548{
563 pte_t pte = *ptep; 549 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
564#ifndef __s390x__ 550#ifndef __s390x__
565 if (!(pte_val(pte) & _PAGE_INVALID)) {
566 /* S390 has 1mb segments, we are emulating 4MB segments */ 551 /* S390 has 1mb segments, we are emulating 4MB segments */
567 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 552 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
568 __asm__ __volatile__ ("ipte %2,%3" 553#else
569 : "=m" (*ptep) : "m" (*ptep), 554 /* ipte in zarch mode can do the math */
570 "a" (pto), "a" (address) ); 555 pte_t *pto = ptep;
556#endif
557 asm volatile ("ipte %2,%3"
558 : "=m" (*ptep) : "m" (*ptep),
559 "a" (pto), "a" (address) );
571 } 560 }
572#else /* __s390x__ */ 561 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
573 if (!(pte_val(pte) & _PAGE_INVALID)) 562}
574 __asm__ __volatile__ ("ipte %2,%3" 563
575 : "=m" (*ptep) : "m" (*ptep), 564static inline pte_t
576 "a" (ptep), "a" (address) ); 565ptep_clear_flush(struct vm_area_struct *vma,
577#endif /* __s390x__ */ 566 unsigned long address, pte_t *ptep)
578 pte_val(*ptep) = _PAGE_INVALID_EMPTY; 567{
568 pte_t pte = *ptep;
569
570 __ptep_ipte(address, ptep);
579 return pte; 571 return pte;
580} 572}
581 573
@@ -755,7 +747,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
755{ 747{
756 pte_t pte; 748 pte_t pte;
757 offset &= __SWP_OFFSET_MASK; 749 offset &= __SWP_OFFSET_MASK;
758 pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | 750 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
759 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 751 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
760 return pte; 752 return pte;
761} 753}
@@ -778,7 +770,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
778 770
779#define pgoff_to_pte(__off) \ 771#define pgoff_to_pte(__off) \
780 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 772 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
781 | _PAGE_INVALID_FILE }) 773 | _PAGE_TYPE_FILE })
782 774
783#endif /* !__ASSEMBLY__ */ 775#endif /* !__ASSEMBLY__ */
784 776
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 5b71d3731723..a3a4e5fd30d7 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -339,4 +339,21 @@ int unregister_idle_notifier(struct notifier_block *nb);
339 339
340#endif 340#endif
341 341
342/*
343 * Helper macro for exception table entries
344 */
345#ifndef __s390x__
346#define EX_TABLE(_fault,_target) \
347 ".section __ex_table,\"a\"\n" \
348 " .align 4\n" \
349 " .long " #_fault "," #_target "\n" \
350 ".previous\n"
351#else
352#define EX_TABLE(_fault,_target) \
353 ".section __ex_table,\"a\"\n" \
354 " .align 8\n" \
355 " .quad " #_fault "," #_target "\n" \
356 ".previous\n"
357#endif
358
342#endif /* __ASM_S390_PROCESSOR_H */ 359#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 19e31979309a..f1959732b6fd 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -14,8 +14,6 @@
14 14
15#define PARMAREA 0x10400 15#define PARMAREA 0x10400
16#define COMMAND_LINE_SIZE 896 16#define COMMAND_LINE_SIZE 896
17#define RAMDISK_ORIGIN 0x800000
18#define RAMDISK_SIZE 0x800000
19#define MEMORY_CHUNKS 16 /* max 0x7fff */ 17#define MEMORY_CHUNKS 16 /* max 0x7fff */
20#define IPL_PARMBLOCK_ORIGIN 0x2000 18#define IPL_PARMBLOCK_ORIGIN 0x2000
21 19
@@ -46,10 +44,12 @@ extern unsigned long machine_flags;
46#define MACHINE_HAS_IEEE (machine_flags & 2) 44#define MACHINE_HAS_IEEE (machine_flags & 2)
47#define MACHINE_HAS_CSP (machine_flags & 8) 45#define MACHINE_HAS_CSP (machine_flags & 8)
48#define MACHINE_HAS_DIAG44 (1) 46#define MACHINE_HAS_DIAG44 (1)
47#define MACHINE_HAS_MVCOS (0)
49#else /* __s390x__ */ 48#else /* __s390x__ */
50#define MACHINE_HAS_IEEE (1) 49#define MACHINE_HAS_IEEE (1)
51#define MACHINE_HAS_CSP (1) 50#define MACHINE_HAS_CSP (1)
52#define MACHINE_HAS_DIAG44 (machine_flags & 32) 51#define MACHINE_HAS_DIAG44 (machine_flags & 32)
52#define MACHINE_HAS_MVCOS (machine_flags & 512)
53#endif /* __s390x__ */ 53#endif /* __s390x__ */
54 54
55 55
@@ -70,52 +70,76 @@ extern unsigned int console_irq;
70#define SET_CONSOLE_3215 do { console_mode = 2; } while (0) 70#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
71#define SET_CONSOLE_3270 do { console_mode = 3; } while (0) 71#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
72 72
73struct ipl_list_header { 73
74 u32 length; 74struct ipl_list_hdr {
75 u8 reserved[3]; 75 u32 len;
76 u8 reserved1[3];
76 u8 version; 77 u8 version;
78 u32 blk0_len;
79 u8 pbt;
80 u8 flags;
81 u16 reserved2;
77} __attribute__((packed)); 82} __attribute__((packed));
78 83
79struct ipl_block_fcp { 84struct ipl_block_fcp {
80 u32 length; 85 u8 reserved1[313-1];
81 u8 pbt; 86 u8 opt;
82 u8 reserved1[322-1]; 87 u8 reserved2[3];
88 u16 reserved3;
83 u16 devno; 89 u16 devno;
84 u8 reserved2[4]; 90 u8 reserved4[4];
85 u64 wwpn; 91 u64 wwpn;
86 u64 lun; 92 u64 lun;
87 u32 bootprog; 93 u32 bootprog;
88 u8 reserved3[12]; 94 u8 reserved5[12];
89 u64 br_lba; 95 u64 br_lba;
90 u32 scp_data_len; 96 u32 scp_data_len;
91 u8 reserved4[260]; 97 u8 reserved6[260];
92 u8 scp_data[]; 98 u8 scp_data[];
93} __attribute__((packed)); 99} __attribute__((packed));
94 100
101struct ipl_block_ccw {
102 u8 load_param[8];
103 u8 reserved1[84];
104 u8 reserved2[2];
105 u16 devno;
106 u8 vm_flags;
107 u8 reserved3[3];
108 u32 vm_parm_len;
109} __attribute__((packed));
110
95struct ipl_parameter_block { 111struct ipl_parameter_block {
112 struct ipl_list_hdr hdr;
96 union { 113 union {
97 u32 length; 114 struct ipl_block_fcp fcp;
98 struct ipl_list_header header; 115 struct ipl_block_ccw ccw;
99 } hdr; 116 } ipl_info;
100 struct ipl_block_fcp fcp;
101} __attribute__((packed)); 117} __attribute__((packed));
102 118
103#define IPL_MAX_SUPPORTED_VERSION (0) 119#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
120 sizeof(struct ipl_block_fcp))
104 121
105#define IPL_TYPE_FCP (0) 122#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
123 sizeof(struct ipl_block_ccw))
124
125#define IPL_MAX_SUPPORTED_VERSION (0)
106 126
107/* 127/*
108 * IPL validity flags and parameters as detected in head.S 128 * IPL validity flags and parameters as detected in head.S
109 */ 129 */
110extern u32 ipl_parameter_flags; 130extern u32 ipl_flags;
111extern u16 ipl_devno; 131extern u16 ipl_devno;
112 132
113#define IPL_DEVNO_VALID (ipl_parameter_flags & 1) 133void do_reipl(void);
114#define IPL_PARMBLOCK_VALID (ipl_parameter_flags & 2) 134
135enum {
136 IPL_DEVNO_VALID = 1,
137 IPL_PARMBLOCK_VALID = 2,
138};
115 139
116#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ 140#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
117 IPL_PARMBLOCK_ORIGIN) 141 IPL_PARMBLOCK_ORIGIN)
118#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.length) 142#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
119 143
120#else /* __ASSEMBLY__ */ 144#else /* __ASSEMBLY__ */
121 145
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 657646054c5e..9fb02e9779c9 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -104,7 +104,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
104#define smp_cpu_not_running(cpu) 1 104#define smp_cpu_not_running(cpu) 1
105#define smp_get_cpu(cpu) ({ 0; }) 105#define smp_get_cpu(cpu) ({ 0; })
106#define smp_put_cpu(cpu) ({ 0; }) 106#define smp_put_cpu(cpu) ({ 0; })
107#define smp_setup_cpu_possible_map() 107#define smp_setup_cpu_possible_map() do { } while (0)
108#endif 108#endif
109 109
110#endif 110#endif
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 0b7c0ca4c3d7..e2047b0c9092 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -47,7 +47,7 @@
47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \
49}) 49})
50#else 50#else /* __s390x__ */
51#define set_fs(x) \ 51#define set_fs(x) \
52({ \ 52({ \
53 unsigned long __pto; \ 53 unsigned long __pto; \
@@ -56,7 +56,7 @@
56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ 57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
58}) 58})
59#endif 59#endif /* __s390x__ */
60 60
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 61#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 62
@@ -85,76 +85,51 @@ struct exception_table_entry
85 unsigned long insn, fixup; 85 unsigned long insn, fixup;
86}; 86};
87 87
88#ifndef __s390x__ 88struct uaccess_ops {
89#define __uaccess_fixup \ 89 size_t (*copy_from_user)(size_t, const void __user *, void *);
90 ".section .fixup,\"ax\"\n" \ 90 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
91 "2: lhi %0,%4\n" \ 91 size_t (*copy_to_user)(size_t, void __user *, const void *);
92 " bras 1,3f\n" \ 92 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
93 " .long 1b\n" \ 93 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
94 "3: l 1,0(1)\n" \ 94 size_t (*clear_user)(size_t, void __user *);
95 " br 1\n" \ 95 size_t (*strnlen_user)(size_t, const char __user *);
96 ".previous\n" \ 96 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
97 ".section __ex_table,\"a\"\n" \ 97 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
98 " .align 4\n" \ 98 int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
99 " .long 0b,2b\n" \ 99};
100 ".previous" 100
101#define __uaccess_clobber "cc", "1" 101extern struct uaccess_ops uaccess;
102#else /* __s390x__ */ 102extern struct uaccess_ops uaccess_std;
103#define __uaccess_fixup \ 103extern struct uaccess_ops uaccess_mvcos;
104 ".section .fixup,\"ax\"\n" \ 104
105 "2: lghi %0,%4\n" \ 105static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
106 " jg 1b\n" \ 106{
107 ".previous\n" \ 107 size = uaccess.copy_to_user_small(size, ptr, x);
108 ".section __ex_table,\"a\"\n" \ 108 return size ? -EFAULT : size;
109 " .align 8\n" \ 109}
110 " .quad 0b,2b\n" \ 110
111 ".previous" 111static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
112#define __uaccess_clobber "cc" 112{
113#endif /* __s390x__ */ 113 size = uaccess.copy_from_user_small(size, ptr, x);
114 return size ? -EFAULT : size;
115}
114 116
115/* 117/*
116 * These are the main single-value transfer routines. They automatically 118 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type. 119 * use the right size if we just have the right pointer type.
118 */ 120 */
119#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
120#define __put_user_asm(x, ptr, err) \
121({ \
122 err = 0; \
123 asm volatile( \
124 "0: mvcs 0(%1,%2),%3,%0\n" \
125 "1:\n" \
126 __uaccess_fixup \
127 : "+&d" (err) \
128 : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \
129 "K" (-EFAULT) \
130 : __uaccess_clobber ); \
131})
132#else
133#define __put_user_asm(x, ptr, err) \
134({ \
135 err = 0; \
136 asm volatile( \
137 "0: mvcs 0(%1,%2),0(%3),%0\n" \
138 "1:\n" \
139 __uaccess_fixup \
140 : "+&d" (err) \
141 : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \
142 "K" (-EFAULT), "m" (x) \
143 : __uaccess_clobber ); \
144})
145#endif
146
147#define __put_user(x, ptr) \ 121#define __put_user(x, ptr) \
148({ \ 122({ \
149 __typeof__(*(ptr)) __x = (x); \ 123 __typeof__(*(ptr)) __x = (x); \
150 int __pu_err; \ 124 int __pu_err = -EFAULT; \
151 __chk_user_ptr(ptr); \ 125 __chk_user_ptr(ptr); \
152 switch (sizeof (*(ptr))) { \ 126 switch (sizeof (*(ptr))) { \
153 case 1: \ 127 case 1: \
154 case 2: \ 128 case 2: \
155 case 4: \ 129 case 4: \
156 case 8: \ 130 case 8: \
157 __put_user_asm(__x, ptr, __pu_err); \ 131 __pu_err = __put_user_fn(sizeof (*(ptr)), \
132 ptr, &__x); \
158 break; \ 133 break; \
159 default: \ 134 default: \
160 __put_user_bad(); \ 135 __put_user_bad(); \
@@ -172,60 +147,36 @@ struct exception_table_entry
172 147
173extern int __put_user_bad(void) __attribute__((noreturn)); 148extern int __put_user_bad(void) __attribute__((noreturn));
174 149
175#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
176#define __get_user_asm(x, ptr, err) \
177({ \
178 err = 0; \
179 asm volatile ( \
180 "0: mvcp %O1(%2,%R1),0(%3),%0\n" \
181 "1:\n" \
182 __uaccess_fixup \
183 : "+&d" (err), "=Q" (x) \
184 : "d" (sizeof(*(ptr))), "a" (ptr), \
185 "K" (-EFAULT) \
186 : __uaccess_clobber ); \
187})
188#else
189#define __get_user_asm(x, ptr, err) \
190({ \
191 err = 0; \
192 asm volatile ( \
193 "0: mvcp 0(%2,%5),0(%3),%0\n" \
194 "1:\n" \
195 __uaccess_fixup \
196 : "+&d" (err), "=m" (x) \
197 : "d" (sizeof(*(ptr))), "a" (ptr), \
198 "K" (-EFAULT), "a" (&(x)) \
199 : __uaccess_clobber ); \
200})
201#endif
202
203#define __get_user(x, ptr) \ 150#define __get_user(x, ptr) \
204({ \ 151({ \
205 int __gu_err; \ 152 int __gu_err = -EFAULT; \
206 __chk_user_ptr(ptr); \ 153 __chk_user_ptr(ptr); \
207 switch (sizeof(*(ptr))) { \ 154 switch (sizeof(*(ptr))) { \
208 case 1: { \ 155 case 1: { \
209 unsigned char __x; \ 156 unsigned char __x; \
210 __get_user_asm(__x, ptr, __gu_err); \ 157 __gu_err = __get_user_fn(sizeof (*(ptr)), \
158 ptr, &__x); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 159 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \ 160 break; \
213 }; \ 161 }; \
214 case 2: { \ 162 case 2: { \
215 unsigned short __x; \ 163 unsigned short __x; \
216 __get_user_asm(__x, ptr, __gu_err); \ 164 __gu_err = __get_user_fn(sizeof (*(ptr)), \
165 ptr, &__x); \
217 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 166 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
218 break; \ 167 break; \
219 }; \ 168 }; \
220 case 4: { \ 169 case 4: { \
221 unsigned int __x; \ 170 unsigned int __x; \
222 __get_user_asm(__x, ptr, __gu_err); \ 171 __gu_err = __get_user_fn(sizeof (*(ptr)), \
172 ptr, &__x); \
223 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 173 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
224 break; \ 174 break; \
225 }; \ 175 }; \
226 case 8: { \ 176 case 8: { \
227 unsigned long long __x; \ 177 unsigned long long __x; \
228 __get_user_asm(__x, ptr, __gu_err); \ 178 __gu_err = __get_user_fn(sizeof (*(ptr)), \
179 ptr, &__x); \
229 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 180 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
230 break; \ 181 break; \
231 }; \ 182 }; \
@@ -247,8 +198,6 @@ extern int __get_user_bad(void) __attribute__((noreturn));
247#define __put_user_unaligned __put_user 198#define __put_user_unaligned __put_user
248#define __get_user_unaligned __get_user 199#define __get_user_unaligned __get_user
249 200
250extern long __copy_to_user_asm(const void *from, long n, void __user *to);
251
252/** 201/**
253 * __copy_to_user: - Copy a block of data into user space, with less checking. 202 * __copy_to_user: - Copy a block of data into user space, with less checking.
254 * @to: Destination address, in user space. 203 * @to: Destination address, in user space.
@@ -266,7 +215,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to);
266static inline unsigned long 215static inline unsigned long
267__copy_to_user(void __user *to, const void *from, unsigned long n) 216__copy_to_user(void __user *to, const void *from, unsigned long n)
268{ 217{
269 return __copy_to_user_asm(from, n, to); 218 if (__builtin_constant_p(n) && (n <= 256))
219 return uaccess.copy_to_user_small(n, to, from);
220 else
221 return uaccess.copy_to_user(n, to, from);
270} 222}
271 223
272#define __copy_to_user_inatomic __copy_to_user 224#define __copy_to_user_inatomic __copy_to_user
@@ -294,8 +246,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
294 return n; 246 return n;
295} 247}
296 248
297extern long __copy_from_user_asm(void *to, long n, const void __user *from);
298
299/** 249/**
300 * __copy_from_user: - Copy a block of data from user space, with less checking. 250 * __copy_from_user: - Copy a block of data from user space, with less checking.
301 * @to: Destination address, in kernel space. 251 * @to: Destination address, in kernel space.
@@ -316,7 +266,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from);
316static inline unsigned long 266static inline unsigned long
317__copy_from_user(void *to, const void __user *from, unsigned long n) 267__copy_from_user(void *to, const void __user *from, unsigned long n)
318{ 268{
319 return __copy_from_user_asm(to, n, from); 269 if (__builtin_constant_p(n) && (n <= 256))
270 return uaccess.copy_from_user_small(n, from, to);
271 else
272 return uaccess.copy_from_user(n, from, to);
320} 273}
321 274
322/** 275/**
@@ -346,13 +299,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
346 return n; 299 return n;
347} 300}
348 301
349extern unsigned long __copy_in_user_asm(const void __user *from, long n,
350 void __user *to);
351
352static inline unsigned long 302static inline unsigned long
353__copy_in_user(void __user *to, const void __user *from, unsigned long n) 303__copy_in_user(void __user *to, const void __user *from, unsigned long n)
354{ 304{
355 return __copy_in_user_asm(from, n, to); 305 return uaccess.copy_in_user(n, to, from);
356} 306}
357 307
358static inline unsigned long 308static inline unsigned long
@@ -360,34 +310,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
360{ 310{
361 might_sleep(); 311 might_sleep();
362 if (__access_ok(from,n) && __access_ok(to,n)) 312 if (__access_ok(from,n) && __access_ok(to,n))
363 n = __copy_in_user_asm(from, n, to); 313 n = __copy_in_user(to, from, n);
364 return n; 314 return n;
365} 315}
366 316
367/* 317/*
368 * Copy a null terminated string from userspace. 318 * Copy a null terminated string from userspace.
369 */ 319 */
370extern long __strncpy_from_user_asm(long count, char *dst,
371 const char __user *src);
372
373static inline long 320static inline long
374strncpy_from_user(char *dst, const char __user *src, long count) 321strncpy_from_user(char *dst, const char __user *src, long count)
375{ 322{
376 long res = -EFAULT; 323 long res = -EFAULT;
377 might_sleep(); 324 might_sleep();
378 if (access_ok(VERIFY_READ, src, 1)) 325 if (access_ok(VERIFY_READ, src, 1))
379 res = __strncpy_from_user_asm(count, dst, src); 326 res = uaccess.strncpy_from_user(count, src, dst);
380 return res; 327 return res;
381} 328}
382 329
383
384extern long __strnlen_user_asm(long count, const char __user *src);
385
386static inline unsigned long 330static inline unsigned long
387strnlen_user(const char __user * src, unsigned long n) 331strnlen_user(const char __user * src, unsigned long n)
388{ 332{
389 might_sleep(); 333 might_sleep();
390 return __strnlen_user_asm(n, src); 334 return uaccess.strnlen_user(n, src);
391} 335}
392 336
393/** 337/**
@@ -410,12 +354,10 @@ strnlen_user(const char __user * src, unsigned long n)
410 * Zero Userspace 354 * Zero Userspace
411 */ 355 */
412 356
413extern long __clear_user_asm(void __user *to, long n);
414
415static inline unsigned long 357static inline unsigned long
416__clear_user(void __user *to, unsigned long n) 358__clear_user(void __user *to, unsigned long n)
417{ 359{
418 return __clear_user_asm(to, n); 360 return uaccess.clear_user(n, to);
419} 361}
420 362
421static inline unsigned long 363static inline unsigned long
@@ -423,7 +365,7 @@ clear_user(void __user *to, unsigned long n)
423{ 365{
424 might_sleep(); 366 might_sleep();
425 if (access_ok(VERIFY_WRITE, to, n)) 367 if (access_ok(VERIFY_WRITE, to, n))
426 n = __clear_user_asm(to, n); 368 n = uaccess.clear_user(n, to);
427 return n; 369 return n;
428} 370}
429 371
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index aa7a243862e1..02b942d85c37 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -25,17 +25,12 @@
25#define __NR_unlink 10 25#define __NR_unlink 10
26#define __NR_execve 11 26#define __NR_execve 11
27#define __NR_chdir 12 27#define __NR_chdir 12
28#define __NR_time 13
29#define __NR_mknod 14 28#define __NR_mknod 14
30#define __NR_chmod 15 29#define __NR_chmod 15
31#define __NR_lchown 16
32#define __NR_lseek 19 30#define __NR_lseek 19
33#define __NR_getpid 20 31#define __NR_getpid 20
34#define __NR_mount 21 32#define __NR_mount 21
35#define __NR_umount 22 33#define __NR_umount 22
36#define __NR_setuid 23
37#define __NR_getuid 24
38#define __NR_stime 25
39#define __NR_ptrace 26 34#define __NR_ptrace 26
40#define __NR_alarm 27 35#define __NR_alarm 27
41#define __NR_pause 29 36#define __NR_pause 29
@@ -51,11 +46,7 @@
51#define __NR_pipe 42 46#define __NR_pipe 42
52#define __NR_times 43 47#define __NR_times 43
53#define __NR_brk 45 48#define __NR_brk 45
54#define __NR_setgid 46
55#define __NR_getgid 47
56#define __NR_signal 48 49#define __NR_signal 48
57#define __NR_geteuid 49
58#define __NR_getegid 50
59#define __NR_acct 51 50#define __NR_acct 51
60#define __NR_umount2 52 51#define __NR_umount2 52
61#define __NR_ioctl 54 52#define __NR_ioctl 54
@@ -69,18 +60,13 @@
69#define __NR_getpgrp 65 60#define __NR_getpgrp 65
70#define __NR_setsid 66 61#define __NR_setsid 66
71#define __NR_sigaction 67 62#define __NR_sigaction 67
72#define __NR_setreuid 70
73#define __NR_setregid 71
74#define __NR_sigsuspend 72 63#define __NR_sigsuspend 72
75#define __NR_sigpending 73 64#define __NR_sigpending 73
76#define __NR_sethostname 74 65#define __NR_sethostname 74
77#define __NR_setrlimit 75 66#define __NR_setrlimit 75
78#define __NR_getrlimit 76
79#define __NR_getrusage 77 67#define __NR_getrusage 77
80#define __NR_gettimeofday 78 68#define __NR_gettimeofday 78
81#define __NR_settimeofday 79 69#define __NR_settimeofday 79
82#define __NR_getgroups 80
83#define __NR_setgroups 81
84#define __NR_symlink 83 70#define __NR_symlink 83
85#define __NR_readlink 85 71#define __NR_readlink 85
86#define __NR_uselib 86 72#define __NR_uselib 86
@@ -92,12 +78,10 @@
92#define __NR_truncate 92 78#define __NR_truncate 92
93#define __NR_ftruncate 93 79#define __NR_ftruncate 93
94#define __NR_fchmod 94 80#define __NR_fchmod 94
95#define __NR_fchown 95
96#define __NR_getpriority 96 81#define __NR_getpriority 96
97#define __NR_setpriority 97 82#define __NR_setpriority 97
98#define __NR_statfs 99 83#define __NR_statfs 99
99#define __NR_fstatfs 100 84#define __NR_fstatfs 100
100#define __NR_ioperm 101
101#define __NR_socketcall 102 85#define __NR_socketcall 102
102#define __NR_syslog 103 86#define __NR_syslog 103
103#define __NR_setitimer 104 87#define __NR_setitimer 104
@@ -131,11 +115,7 @@
131#define __NR_sysfs 135 115#define __NR_sysfs 135
132#define __NR_personality 136 116#define __NR_personality 136
133#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 117#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
134#define __NR_setfsuid 138
135#define __NR_setfsgid 139
136#define __NR__llseek 140
137#define __NR_getdents 141 118#define __NR_getdents 141
138#define __NR__newselect 142
139#define __NR_flock 143 119#define __NR_flock 143
140#define __NR_msync 144 120#define __NR_msync 144
141#define __NR_readv 145 121#define __NR_readv 145
@@ -157,13 +137,9 @@
157#define __NR_sched_rr_get_interval 161 137#define __NR_sched_rr_get_interval 161
158#define __NR_nanosleep 162 138#define __NR_nanosleep 162
159#define __NR_mremap 163 139#define __NR_mremap 163
160#define __NR_setresuid 164
161#define __NR_getresuid 165
162#define __NR_query_module 167 140#define __NR_query_module 167
163#define __NR_poll 168 141#define __NR_poll 168
164#define __NR_nfsservctl 169 142#define __NR_nfsservctl 169
165#define __NR_setresgid 170
166#define __NR_getresgid 171
167#define __NR_prctl 172 143#define __NR_prctl 172
168#define __NR_rt_sigreturn 173 144#define __NR_rt_sigreturn 173
169#define __NR_rt_sigaction 174 145#define __NR_rt_sigaction 174
@@ -174,7 +150,6 @@
174#define __NR_rt_sigsuspend 179 150#define __NR_rt_sigsuspend 179
175#define __NR_pread64 180 151#define __NR_pread64 180
176#define __NR_pwrite64 181 152#define __NR_pwrite64 181
177#define __NR_chown 182
178#define __NR_getcwd 183 153#define __NR_getcwd 183
179#define __NR_capget 184 154#define __NR_capget 184
180#define __NR_capset 185 155#define __NR_capset 185
@@ -183,39 +158,11 @@
183#define __NR_getpmsg 188 158#define __NR_getpmsg 188
184#define __NR_putpmsg 189 159#define __NR_putpmsg 189
185#define __NR_vfork 190 160#define __NR_vfork 190
186#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
187#define __NR_mmap2 192
188#define __NR_truncate64 193
189#define __NR_ftruncate64 194
190#define __NR_stat64 195
191#define __NR_lstat64 196
192#define __NR_fstat64 197
193#define __NR_lchown32 198
194#define __NR_getuid32 199
195#define __NR_getgid32 200
196#define __NR_geteuid32 201
197#define __NR_getegid32 202
198#define __NR_setreuid32 203
199#define __NR_setregid32 204
200#define __NR_getgroups32 205
201#define __NR_setgroups32 206
202#define __NR_fchown32 207
203#define __NR_setresuid32 208
204#define __NR_getresuid32 209
205#define __NR_setresgid32 210
206#define __NR_getresgid32 211
207#define __NR_chown32 212
208#define __NR_setuid32 213
209#define __NR_setgid32 214
210#define __NR_setfsuid32 215
211#define __NR_setfsgid32 216
212#define __NR_pivot_root 217 161#define __NR_pivot_root 217
213#define __NR_mincore 218 162#define __NR_mincore 218
214#define __NR_madvise 219 163#define __NR_madvise 219
215#define __NR_getdents64 220 164#define __NR_getdents64 220
216#define __NR_fcntl64 221
217#define __NR_readahead 222 165#define __NR_readahead 222
218#define __NR_sendfile64 223
219#define __NR_setxattr 224 166#define __NR_setxattr 224
220#define __NR_lsetxattr 225 167#define __NR_lsetxattr 225
221#define __NR_fsetxattr 226 168#define __NR_fsetxattr 226
@@ -256,7 +203,6 @@
256#define __NR_clock_getres (__NR_timer_create+7) 203#define __NR_clock_getres (__NR_timer_create+7)
257#define __NR_clock_nanosleep (__NR_timer_create+8) 204#define __NR_clock_nanosleep (__NR_timer_create+8)
258/* Number 263 is reserved for vserver */ 205/* Number 263 is reserved for vserver */
259#define __NR_fadvise64_64 264
260#define __NR_statfs64 265 206#define __NR_statfs64 265
261#define __NR_fstatfs64 266 207#define __NR_fstatfs64 266
262#define __NR_remap_file_pages 267 208#define __NR_remap_file_pages 267
@@ -285,7 +231,6 @@
285#define __NR_mknodat 290 231#define __NR_mknodat 290
286#define __NR_fchownat 291 232#define __NR_fchownat 291
287#define __NR_futimesat 292 233#define __NR_futimesat 292
288#define __NR_fstatat64 293
289#define __NR_unlinkat 294 234#define __NR_unlinkat 294
290#define __NR_renameat 295 235#define __NR_renameat 295
291#define __NR_linkat 296 236#define __NR_linkat 296
@@ -310,62 +255,65 @@
310 * have a different name although they do the same (e.g. __NR_chown32 255 * have a different name although they do the same (e.g. __NR_chown32
311 * is __NR_chown on 64 bit). 256 * is __NR_chown on 64 bit).
312 */ 257 */
313#ifdef __s390x__ 258#ifndef __s390x__
314#undef __NR_time 259
315#undef __NR_lchown 260#define __NR_time 13
316#undef __NR_setuid 261#define __NR_lchown 16
317#undef __NR_getuid 262#define __NR_setuid 23
318#undef __NR_stime 263#define __NR_getuid 24
319#undef __NR_setgid 264#define __NR_stime 25
320#undef __NR_getgid 265#define __NR_setgid 46
321#undef __NR_geteuid 266#define __NR_getgid 47
322#undef __NR_getegid 267#define __NR_geteuid 49
323#undef __NR_setreuid 268#define __NR_getegid 50
324#undef __NR_setregid 269#define __NR_setreuid 70
325#undef __NR_getrlimit 270#define __NR_setregid 71
326#undef __NR_getgroups 271#define __NR_getrlimit 76
327#undef __NR_setgroups 272#define __NR_getgroups 80
328#undef __NR_fchown 273#define __NR_setgroups 81
329#undef __NR_ioperm 274#define __NR_fchown 95
330#undef __NR_setfsuid 275#define __NR_ioperm 101
331#undef __NR_setfsgid 276#define __NR_setfsuid 138
332#undef __NR__llseek 277#define __NR_setfsgid 139
333#undef __NR__newselect 278#define __NR__llseek 140
334#undef __NR_setresuid 279#define __NR__newselect 142
335#undef __NR_getresuid 280#define __NR_setresuid 164
336#undef __NR_setresgid 281#define __NR_getresuid 165
337#undef __NR_getresgid 282#define __NR_setresgid 170
338#undef __NR_chown 283#define __NR_getresgid 171
339#undef __NR_ugetrlimit 284#define __NR_chown 182
340#undef __NR_mmap2 285#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
341#undef __NR_truncate64 286#define __NR_mmap2 192
342#undef __NR_ftruncate64 287#define __NR_truncate64 193
343#undef __NR_stat64 288#define __NR_ftruncate64 194
344#undef __NR_lstat64 289#define __NR_stat64 195
345#undef __NR_fstat64 290#define __NR_lstat64 196
346#undef __NR_lchown32 291#define __NR_fstat64 197
347#undef __NR_getuid32 292#define __NR_lchown32 198
348#undef __NR_getgid32 293#define __NR_getuid32 199
349#undef __NR_geteuid32 294#define __NR_getgid32 200
350#undef __NR_getegid32 295#define __NR_geteuid32 201
351#undef __NR_setreuid32 296#define __NR_getegid32 202
352#undef __NR_setregid32 297#define __NR_setreuid32 203
353#undef __NR_getgroups32 298#define __NR_setregid32 204
354#undef __NR_setgroups32 299#define __NR_getgroups32 205
355#undef __NR_fchown32 300#define __NR_setgroups32 206
356#undef __NR_setresuid32 301#define __NR_fchown32 207
357#undef __NR_getresuid32 302#define __NR_setresuid32 208
358#undef __NR_setresgid32 303#define __NR_getresuid32 209
359#undef __NR_getresgid32 304#define __NR_setresgid32 210
360#undef __NR_chown32 305#define __NR_getresgid32 211
361#undef __NR_setuid32 306#define __NR_chown32 212
362#undef __NR_setgid32 307#define __NR_setuid32 213
363#undef __NR_setfsuid32 308#define __NR_setgid32 214
364#undef __NR_setfsgid32 309#define __NR_setfsuid32 215
365#undef __NR_fcntl64 310#define __NR_setfsgid32 216
366#undef __NR_sendfile64 311#define __NR_fcntl64 221
367#undef __NR_fadvise64_64 312#define __NR_sendfile64 223
368#undef __NR_fstatat64 313#define __NR_fadvise64_64 264
314#define __NR_fstatat64 293
315
316#else
369 317
370#define __NR_select 142 318#define __NR_select 142
371#define __NR_getrlimit 191 /* SuS compliant getrlimit */ 319#define __NR_getrlimit 191 /* SuS compliant getrlimit */
diff --git a/include/asm-s390/z90crypt.h b/include/asm-s390/z90crypt.h
deleted file mode 100644
index 31a2439b07bd..000000000000
--- a/include/asm-s390/z90crypt.h
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * include/asm-s390/z90crypt.h
3 *
4 * z90crypt 1.3.3 (user-visible header)
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef __ASM_S390_Z90CRYPT_H
28#define __ASM_S390_Z90CRYPT_H
29#include <linux/ioctl.h>
30
31#define z90crypt_VERSION 1
32#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
33#define z90crypt_VARIANT 3 // 3 = CEX2A support
34
35/**
36 * struct ica_rsa_modexpo
37 *
38 * Requirements:
39 * - outputdatalength is at least as large as inputdatalength.
40 * - All key parts are right justified in their fields, padded on
41 * the left with zeroes.
42 * - length(b_key) = inputdatalength
43 * - length(n_modulus) = inputdatalength
44 */
45struct ica_rsa_modexpo {
46 char __user * inputdata;
47 unsigned int inputdatalength;
48 char __user * outputdata;
49 unsigned int outputdatalength;
50 char __user * b_key;
51 char __user * n_modulus;
52};
53
54/**
55 * struct ica_rsa_modexpo_crt
56 *
57 * Requirements:
58 * - inputdatalength is even.
59 * - outputdatalength is at least as large as inputdatalength.
60 * - All key parts are right justified in their fields, padded on
61 * the left with zeroes.
62 * - length(bp_key) = inputdatalength/2 + 8
63 * - length(bq_key) = inputdatalength/2
64 * - length(np_key) = inputdatalength/2 + 8
65 * - length(nq_key) = inputdatalength/2
66 * - length(u_mult_inv) = inputdatalength/2 + 8
67 */
68struct ica_rsa_modexpo_crt {
69 char __user * inputdata;
70 unsigned int inputdatalength;
71 char __user * outputdata;
72 unsigned int outputdatalength;
73 char __user * bp_key;
74 char __user * bq_key;
75 char __user * np_prime;
76 char __user * nq_prime;
77 char __user * u_mult_inv;
78};
79
80#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks
81
82/**
83 * Interface notes:
84 *
85 * The ioctl()s which are implemented (along with relevant details)
86 * are:
87 *
88 * ICARSAMODEXPO
89 * Perform an RSA operation using a Modulus-Exponent pair
90 * This takes an ica_rsa_modexpo struct as its arg.
91 *
92 * NOTE: please refer to the comments preceding this structure
93 * for the implementation details for the contents of the
94 * block
95 *
96 * ICARSACRT
97 * Perform an RSA operation using a Chinese-Remainder Theorem key
98 * This takes an ica_rsa_modexpo_crt struct as its arg.
99 *
100 * NOTE: please refer to the comments preceding this structure
101 * for the implementation details for the contents of the
102 * block
103 *
104 * Z90STAT_TOTALCOUNT
105 * Return an integer count of all device types together.
106 *
107 * Z90STAT_PCICACOUNT
108 * Return an integer count of all PCICAs.
109 *
110 * Z90STAT_PCICCCOUNT
111 * Return an integer count of all PCICCs.
112 *
113 * Z90STAT_PCIXCCMCL2COUNT
114 * Return an integer count of all MCL2 PCIXCCs.
115 *
116 * Z90STAT_PCIXCCMCL3COUNT
117 * Return an integer count of all MCL3 PCIXCCs.
118 *
119 * Z90STAT_CEX2CCOUNT
120 * Return an integer count of all CEX2Cs.
121 *
122 * Z90STAT_CEX2ACOUNT
123 * Return an integer count of all CEX2As.
124 *
125 * Z90STAT_REQUESTQ_COUNT
126 * Return an integer count of the number of entries waiting to be
127 * sent to a device.
128 *
129 * Z90STAT_PENDINGQ_COUNT
130 * Return an integer count of the number of entries sent to a
131 * device awaiting the reply.
132 *
133 * Z90STAT_TOTALOPEN_COUNT
134 * Return an integer count of the number of open file handles.
135 *
136 * Z90STAT_DOMAIN_INDEX
137 * Return the integer value of the Cryptographic Domain.
138 *
139 * Z90STAT_STATUS_MASK
140 * Return an 64 element array of unsigned chars for the status of
141 * all devices.
142 * 0x01: PCICA
143 * 0x02: PCICC
144 * 0x03: PCIXCC_MCL2
145 * 0x04: PCIXCC_MCL3
146 * 0x05: CEX2C
147 * 0x06: CEX2A
148 * 0x0d: device is disabled via the proc filesystem
149 *
150 * Z90STAT_QDEPTH_MASK
151 * Return an 64 element array of unsigned chars for the queue
152 * depth of all devices.
153 *
154 * Z90STAT_PERDEV_REQCNT
155 * Return an 64 element array of unsigned integers for the number
156 * of successfully completed requests per device since the device
157 * was detected and made available.
158 *
159 * ICAZ90STATUS (deprecated)
160 * Return some device driver status in a ica_z90_status struct
161 * This takes an ica_z90_status struct as its arg.
162 *
163 * NOTE: this ioctl() is deprecated, and has been replaced with
164 * single ioctl()s for each type of status being requested
165 *
166 * Z90STAT_PCIXCCCOUNT (deprecated)
167 * Return an integer count of all PCIXCCs (MCL2 + MCL3).
168 * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
169 * MCL2 PCIXCCs.
170 *
171 * Z90QUIESCE (not recommended)
172 * Quiesce the driver. This is intended to stop all new
173 * requests from being processed. Its use is NOT recommended,
174 * except in circumstances where there is no other way to stop
175 * callers from accessing the driver. Its original use was to
176 * allow the driver to be "drained" of work in preparation for
177 * a system shutdown.
178 *
179 * NOTE: once issued, this ban on new work cannot be undone
180 * except by unloading and reloading the driver.
181 */
182
183/**
184 * Supported ioctl calls
185 */
186#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0)
187#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0)
188
189/* DEPRECATED status calls (bound for removal at some point) */
190#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status)
191#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int)
192
193/* unrelated to ICA callers */
194#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11)
195
196/* New status calls */
197#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int)
198#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int)
199#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int)
200#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
201#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
202#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
203#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
204#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
205#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
206#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
207#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int)
208#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64])
209#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64])
210#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64])
211
212#endif /* __ASM_S390_Z90CRYPT_H */
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
new file mode 100644
index 000000000000..7244c68464f2
--- /dev/null
+++ b/include/asm-s390/zcrypt.h
@@ -0,0 +1,285 @@
1/*
2 * include/asm-s390/zcrypt.h
3 *
4 * zcrypt 2.1.0 (user-visible header)
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef __ASM_S390_ZCRYPT_H
28#define __ASM_S390_ZCRYPT_H
29
30#define ZCRYPT_VERSION 2
31#define ZCRYPT_RELEASE 1
32#define ZCRYPT_VARIANT 0
33
34#include <linux/ioctl.h>
35#include <linux/compiler.h>
36
37/**
38 * struct ica_rsa_modexpo
39 *
40 * Requirements:
41 * - outputdatalength is at least as large as inputdatalength.
42 * - All key parts are right justified in their fields, padded on
43 * the left with zeroes.
44 * - length(b_key) = inputdatalength
45 * - length(n_modulus) = inputdatalength
46 */
47struct ica_rsa_modexpo {
48 char __user * inputdata;
49 unsigned int inputdatalength;
50 char __user * outputdata;
51 unsigned int outputdatalength;
52 char __user * b_key;
53 char __user * n_modulus;
54};
55
56/**
57 * struct ica_rsa_modexpo_crt
58 *
59 * Requirements:
60 * - inputdatalength is even.
61 * - outputdatalength is at least as large as inputdatalength.
62 * - All key parts are right justified in their fields, padded on
63 * the left with zeroes.
64 * - length(bp_key) = inputdatalength/2 + 8
65 * - length(bq_key) = inputdatalength/2
66 * - length(np_key) = inputdatalength/2 + 8
67 * - length(nq_key) = inputdatalength/2
68 * - length(u_mult_inv) = inputdatalength/2 + 8
69 */
70struct ica_rsa_modexpo_crt {
71 char __user * inputdata;
72 unsigned int inputdatalength;
73 char __user * outputdata;
74 unsigned int outputdatalength;
75 char __user * bp_key;
76 char __user * bq_key;
77 char __user * np_prime;
78 char __user * nq_prime;
79 char __user * u_mult_inv;
80};
81
82/**
83 * CPRBX
84 * Note that all shorts and ints are big-endian.
85 * All pointer fields are 16 bytes long, and mean nothing.
86 *
87 * A request CPRB is followed by a request_parameter_block.
88 *
89 * The request (or reply) parameter block is organized thus:
90 * function code
91 * VUD block
92 * key block
93 */
94struct ica_CPRBX {
95 unsigned short cprb_len; /* CPRB length 220 */
96 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
97 unsigned char pad_000[3]; /* Alignment pad bytes */
98 unsigned char func_id[2]; /* function id 0x5432 */
99 unsigned char cprb_flags[4]; /* Flags */
100 unsigned int req_parml; /* request parameter buffer len */
101 unsigned int req_datal; /* request data buffer */
102 unsigned int rpl_msgbl; /* reply message block length */
103 unsigned int rpld_parml; /* replied parameter block len */
104 unsigned int rpl_datal; /* reply data block len */
105 unsigned int rpld_datal; /* replied data block len */
106 unsigned int req_extbl; /* request extension block len */
107 unsigned char pad_001[4]; /* reserved */
108 unsigned int rpld_extbl; /* replied extension block len */
109 unsigned char padx000[16 - sizeof (char *)];
110 unsigned char * req_parmb; /* request parm block 'address' */
111 unsigned char padx001[16 - sizeof (char *)];
112 unsigned char * req_datab; /* request data block 'address' */
113 unsigned char padx002[16 - sizeof (char *)];
114 unsigned char * rpl_parmb; /* reply parm block 'address' */
115 unsigned char padx003[16 - sizeof (char *)];
116 unsigned char * rpl_datab; /* reply data block 'address' */
117 unsigned char padx004[16 - sizeof (char *)];
118 unsigned char * req_extb; /* request extension block 'addr'*/
119 unsigned char padx005[16 - sizeof (char *)];
120 unsigned char * rpl_extb; /* reply extension block 'addres'*/
121 unsigned short ccp_rtcode; /* server return code */
122 unsigned short ccp_rscode; /* server reason code */
123 unsigned int mac_data_len; /* Mac Data Length */
124 unsigned char logon_id[8]; /* Logon Identifier */
125 unsigned char mac_value[8]; /* Mac Value */
126 unsigned char mac_content_flgs;/* Mac content flag byte */
127 unsigned char pad_002; /* Alignment */
128 unsigned short domain; /* Domain */
129 unsigned char usage_domain[4];/* Usage domain */
130 unsigned char cntrl_domain[4];/* Control domain */
131 unsigned char S390enf_mask[4];/* S/390 enforcement mask */
132 unsigned char pad_004[36]; /* reserved */
133};
134
135/**
136 * xcRB
137 */
138struct ica_xcRB {
139 unsigned short agent_ID;
140 unsigned int user_defined;
141 unsigned short request_ID;
142 unsigned int request_control_blk_length;
143 unsigned char padding1[16 - sizeof (char *)];
144 char __user * request_control_blk_addr;
145 unsigned int request_data_length;
146 char padding2[16 - sizeof (char *)];
147 char __user * request_data_address;
148 unsigned int reply_control_blk_length;
149 char padding3[16 - sizeof (char *)];
150 char __user * reply_control_blk_addr;
151 unsigned int reply_data_length;
152 char padding4[16 - sizeof (char *)];
153 char __user * reply_data_addr;
154 unsigned short priority_window;
155 unsigned int status;
156} __attribute__((packed));
157#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
158
159#define ZCRYPT_IOCTL_MAGIC 'z'
160
161/**
162 * Interface notes:
163 *
164 * The ioctl()s which are implemented (along with relevant details)
165 * are:
166 *
167 * ICARSAMODEXPO
168 * Perform an RSA operation using a Modulus-Exponent pair
169 * This takes an ica_rsa_modexpo struct as its arg.
170 *
171 * NOTE: please refer to the comments preceding this structure
172 * for the implementation details for the contents of the
173 * block
174 *
175 * ICARSACRT
176 * Perform an RSA operation using a Chinese-Remainder Theorem key
177 * This takes an ica_rsa_modexpo_crt struct as its arg.
178 *
179 * NOTE: please refer to the comments preceding this structure
180 * for the implementation details for the contents of the
181 * block
182 *
183 * Z90STAT_TOTALCOUNT
184 * Return an integer count of all device types together.
185 *
186 * Z90STAT_PCICACOUNT
187 * Return an integer count of all PCICAs.
188 *
189 * Z90STAT_PCICCCOUNT
190 * Return an integer count of all PCICCs.
191 *
192 * Z90STAT_PCIXCCMCL2COUNT
193 * Return an integer count of all MCL2 PCIXCCs.
194 *
195 * Z90STAT_PCIXCCMCL3COUNT
196 * Return an integer count of all MCL3 PCIXCCs.
197 *
198 * Z90STAT_CEX2CCOUNT
199 * Return an integer count of all CEX2Cs.
200 *
201 * Z90STAT_CEX2ACOUNT
202 * Return an integer count of all CEX2As.
203 *
204 * Z90STAT_REQUESTQ_COUNT
205 * Return an integer count of the number of entries waiting to be
206 * sent to a device.
207 *
208 * Z90STAT_PENDINGQ_COUNT
209 * Return an integer count of the number of entries sent to a
210 * device awaiting the reply.
211 *
212 * Z90STAT_TOTALOPEN_COUNT
213 * Return an integer count of the number of open file handles.
214 *
215 * Z90STAT_DOMAIN_INDEX
216 * Return the integer value of the Cryptographic Domain.
217 *
218 * Z90STAT_STATUS_MASK
219 * Return an 64 element array of unsigned chars for the status of
220 * all devices.
221 * 0x01: PCICA
222 * 0x02: PCICC
223 * 0x03: PCIXCC_MCL2
224 * 0x04: PCIXCC_MCL3
225 * 0x05: CEX2C
226 * 0x06: CEX2A
227 * 0x0d: device is disabled via the proc filesystem
228 *
229 * Z90STAT_QDEPTH_MASK
230 * Return an 64 element array of unsigned chars for the queue
231 * depth of all devices.
232 *
233 * Z90STAT_PERDEV_REQCNT
234 * Return an 64 element array of unsigned integers for the number
235 * of successfully completed requests per device since the device
236 * was detected and made available.
237 *
238 * ICAZ90STATUS (deprecated)
239 * Return some device driver status in a ica_z90_status struct
240 * This takes an ica_z90_status struct as its arg.
241 *
242 * NOTE: this ioctl() is deprecated, and has been replaced with
243 * single ioctl()s for each type of status being requested
244 *
245 * Z90STAT_PCIXCCCOUNT (deprecated)
246 * Return an integer count of all PCIXCCs (MCL2 + MCL3).
247 * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
248 * MCL2 PCIXCCs.
249 *
250 * Z90QUIESCE (not recommended)
251 * Quiesce the driver. This is intended to stop all new
252 * requests from being processed. Its use is NOT recommended,
253 * except in circumstances where there is no other way to stop
254 * callers from accessing the driver. Its original use was to
255 * allow the driver to be "drained" of work in preparation for
256 * a system shutdown.
257 *
258 * NOTE: once issued, this ban on new work cannot be undone
259 * except by unloading and reloading the driver.
260 */
261
262/**
263 * Supported ioctl calls
264 */
265#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
266#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
267#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
268
269/* New status calls */
270#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
271#define Z90STAT_PCICACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x41, int)
272#define Z90STAT_PCICCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x42, int)
273#define Z90STAT_PCIXCCMCL2COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4b, int)
274#define Z90STAT_PCIXCCMCL3COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4c, int)
275#define Z90STAT_CEX2CCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4d, int)
276#define Z90STAT_CEX2ACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4e, int)
277#define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int)
278#define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int)
279#define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int)
280#define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int)
281#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64])
282#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64])
283#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64])
284
285#endif /* __ASM_S390_ZCRYPT_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f6977708585c..f7ca0b09075d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -148,6 +148,17 @@ struct ccw_device_id {
148#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04 148#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04
149#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08 149#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08
150 150
151/* s390 AP bus devices */
152struct ap_device_id {
153 __u16 match_flags; /* which fields to match against */
154 __u8 dev_type; /* device type */
155 __u8 pad1;
156 __u32 pad2;
157 kernel_ulong_t driver_info;
158};
159
160#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
161
151 162
152#define PNP_ID_LEN 8 163#define PNP_ID_LEN 8
153#define PNP_MAX_DEVICES 8 164#define PNP_MAX_DEVICES 8
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e2de650d3dbf..de76da80443f 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -265,6 +265,14 @@ static int do_ccw_entry(const char *filename,
265 return 1; 265 return 1;
266} 266}
267 267
268/* looks like: "ap:tN" */
269static int do_ap_entry(const char *filename,
270 struct ap_device_id *id, char *alias)
271{
272 sprintf(alias, "ap:t%02X", id->dev_type);
273 return 1;
274}
275
268/* Looks like: "serio:tyNprNidNexN" */ 276/* Looks like: "serio:tyNprNidNexN" */
269static int do_serio_entry(const char *filename, 277static int do_serio_entry(const char *filename,
270 struct serio_device_id *id, char *alias) 278 struct serio_device_id *id, char *alias)
@@ -503,6 +511,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
503 do_table(symval, sym->st_size, 511 do_table(symval, sym->st_size,
504 sizeof(struct ccw_device_id), "ccw", 512 sizeof(struct ccw_device_id), "ccw",
505 do_ccw_entry, mod); 513 do_ccw_entry, mod);
514 else if (sym_is(symname, "__mod_ap_device_table"))
515 do_table(symval, sym->st_size,
516 sizeof(struct ap_device_id), "ap",
517 do_ap_entry, mod);
506 else if (sym_is(symname, "__mod_serio_device_table")) 518 else if (sym_is(symname, "__mod_serio_device_table"))
507 do_table(symval, sym->st_size, 519 do_table(symval, sym->st_size,
508 sizeof(struct serio_device_id), "serio", 520 sizeof(struct serio_device_id), "serio",