aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Kconfig30
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_devmap.c82
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monwriter.c292
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/cio.c95
-rw-r--r--drivers/s390/cio/css.c203
-rw-r--r--drivers/s390/cio/device.c109
-rw-r--r--drivers/s390/cio/device_fsm.c40
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c81
-rw-r--r--drivers/s390/cio/qdio.c4
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/crypto/Makefile15
-rw-r--r--drivers/s390/crypto/ap_bus.c1221
-rw-r--r--drivers/s390/crypto/ap_bus.h158
-rw-r--r--drivers/s390/crypto/z90common.h166
-rw-r--r--drivers/s390/crypto/z90crypt.h71
-rw-r--r--drivers/s390/crypto/z90hardware.c2531
-rw-r--r--drivers/s390/crypto/z90main.c3379
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1091
-rw-r--r--drivers/s390/crypto/zcrypt_api.h141
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h350
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c435
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h126
-rw-r--r--drivers/s390/crypto/zcrypt_error.h133
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c100
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c418
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h117
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c630
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h176
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c951
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h79
-rw-r--r--drivers/s390/s390mach.c17
-rw-r--r--drivers/s390/scsi/zfcp_aux.c84
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c13
-rw-r--r--drivers/s390/scsi/zfcp_def.h32
-rw-r--r--drivers/s390/scsi/zfcp_erp.c231
-rw-r--r--drivers/s390/scsi/zfcp_ext.h18
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c299
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c112
-rw-r--r--drivers/s390/sysinfo.c455
49 files changed, 7432 insertions, 7122 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index 4d36208ff8de..ae89b9b88743 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -213,17 +213,35 @@ config MONREADER
213 help 213 help
214 Character device driver for reading z/VM monitor service records 214 Character device driver for reading z/VM monitor service records
215 215
216config MONWRITER
217 tristate "API for writing z/VM monitor service records"
218 default "m"
219 help
220 Character device driver for writing z/VM monitor service records
221
216endmenu 222endmenu
217 223
218menu "Cryptographic devices" 224menu "Cryptographic devices"
219 225
220config Z90CRYPT 226config ZCRYPT
221 tristate "Support for PCI-attached cryptographic adapters" 227 tristate "Support for PCI-attached cryptographic adapters"
222 default "m" 228 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
223 help 229 default "m"
230 help
224 Select this option if you want to use a PCI-attached cryptographic 231 Select this option if you want to use a PCI-attached cryptographic
225 adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI 232 adapter like:
226 Cryptographic Coprocessor (PCICC). This option is also available 233 + PCI Cryptographic Accelerator (PCICA)
227 as a module called z90crypt.ko. 234 + PCI Cryptographic Coprocessor (PCICC)
235 + PCI-X Cryptographic Coprocessor (PCIXCC)
236 + Crypto Express2 Coprocessor (CEX2C)
237 + Crypto Express2 Accelerator (CEX2A)
238
239config ZCRYPT_MONOLITHIC
240 bool "Monolithic zcrypt module"
241 depends on ZCRYPT="m"
242 help
243 Select this option if you want to have a single module z90crypt.ko
244 that contains all parts of the crypto device driver (ap bus,
245 request router and all the card drivers).
228 246
229endmenu 247endmenu
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 25c1ef6dfd44..d0647d116eaa 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -184,7 +184,7 @@ dasd_state_known_to_basic(struct dasd_device * device)
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 8 * sizeof (long)); 185 8 * sizeof (long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 186 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_EMERG); 187 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
189 189
190 device->state = DASD_STATE_BASIC; 190 device->state = DASD_STATE_BASIC;
@@ -893,7 +893,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 893
894 device = (struct dasd_device *) cqr->device; 894 device = (struct dasd_device *) cqr->device;
895 if (device == NULL || 895 if (device == NULL ||
896 device != dasd_device_from_cdev(cdev) || 896 device != dasd_device_from_cdev_locked(cdev) ||
897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
899 cdev->dev.bus_id); 899 cdev->dev.bus_id);
@@ -970,7 +970,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
970 /* first of all check for state change pending interrupt */ 970 /* first of all check for state change pending interrupt */
971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
972 if ((irb->scsw.dstat & mask) == mask) { 972 if ((irb->scsw.dstat & mask) == mask) {
973 device = dasd_device_from_cdev(cdev); 973 device = dasd_device_from_cdev_locked(cdev);
974 if (!IS_ERR(device)) { 974 if (!IS_ERR(device)) {
975 dasd_handle_state_change_pending(device); 975 dasd_handle_state_change_pending(device);
976 dasd_put_device(device); 976 dasd_put_device(device);
@@ -2169,7 +2169,7 @@ dasd_init(void)
2169 goto failed; 2169 goto failed;
2170 } 2170 }
2171 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2171 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2172 debug_set_level(dasd_debug_area, DBF_EMERG); 2172 debug_set_level(dasd_debug_area, DBF_WARNING);
2173 2173
2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2175 2175
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9af02c79ce8a..91cf971f0652 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -258,8 +258,12 @@ dasd_parse_keyword( char *parsestring ) {
258 return residual_str; 258 return residual_str;
259 } 259 }
260 if (strncmp("nopav", parsestring, length) == 0) { 260 if (strncmp("nopav", parsestring, length) == 0) {
261 dasd_nopav = 1; 261 if (MACHINE_IS_VM)
262 MESSAGE(KERN_INFO, "%s", "disable PAV mode"); 262 MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM");
263 else {
264 dasd_nopav = 1;
265 MESSAGE(KERN_INFO, "%s", "disable PAV mode");
266 }
263 return residual_str; 267 return residual_str;
264 } 268 }
265 if (strncmp("fixedbuffers", parsestring, length) == 0) { 269 if (strncmp("fixedbuffers", parsestring, length) == 0) {
@@ -523,17 +527,17 @@ dasd_create_device(struct ccw_device *cdev)
523{ 527{
524 struct dasd_devmap *devmap; 528 struct dasd_devmap *devmap;
525 struct dasd_device *device; 529 struct dasd_device *device;
530 unsigned long flags;
526 int rc; 531 int rc;
527 532
528 devmap = dasd_devmap_from_cdev(cdev); 533 devmap = dasd_devmap_from_cdev(cdev);
529 if (IS_ERR(devmap)) 534 if (IS_ERR(devmap))
530 return (void *) devmap; 535 return (void *) devmap;
531 cdev->dev.driver_data = devmap;
532 536
533 device = dasd_alloc_device(); 537 device = dasd_alloc_device();
534 if (IS_ERR(device)) 538 if (IS_ERR(device))
535 return device; 539 return device;
536 atomic_set(&device->ref_count, 2); 540 atomic_set(&device->ref_count, 3);
537 541
538 spin_lock(&dasd_devmap_lock); 542 spin_lock(&dasd_devmap_lock);
539 if (!devmap->device) { 543 if (!devmap->device) {
@@ -552,6 +556,11 @@ dasd_create_device(struct ccw_device *cdev)
552 dasd_free_device(device); 556 dasd_free_device(device);
553 return ERR_PTR(rc); 557 return ERR_PTR(rc);
554 } 558 }
559
560 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
561 cdev->dev.driver_data = device;
562 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
563
555 return device; 564 return device;
556} 565}
557 566
@@ -569,6 +578,7 @@ dasd_delete_device(struct dasd_device *device)
569{ 578{
570 struct ccw_device *cdev; 579 struct ccw_device *cdev;
571 struct dasd_devmap *devmap; 580 struct dasd_devmap *devmap;
581 unsigned long flags;
572 582
573 /* First remove device pointer from devmap. */ 583 /* First remove device pointer from devmap. */
574 devmap = dasd_find_busid(device->cdev->dev.bus_id); 584 devmap = dasd_find_busid(device->cdev->dev.bus_id);
@@ -582,9 +592,16 @@ dasd_delete_device(struct dasd_device *device)
582 devmap->device = NULL; 592 devmap->device = NULL;
583 spin_unlock(&dasd_devmap_lock); 593 spin_unlock(&dasd_devmap_lock);
584 594
585 /* Drop ref_count by 2, one for the devmap reference and 595 /* Disconnect dasd_device structure from ccw_device structure. */
586 * one for the passed reference. */ 596 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
587 atomic_sub(2, &device->ref_count); 597 device->cdev->dev.driver_data = NULL;
598 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
599
600 /*
601 * Drop ref_count by 3, one for the devmap reference, one for
602 * the cdev reference and one for the passed reference.
603 */
604 atomic_sub(3, &device->ref_count);
588 605
589 /* Wait for reference counter to drop to zero. */ 606 /* Wait for reference counter to drop to zero. */
590 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); 607 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
@@ -593,9 +610,6 @@ dasd_delete_device(struct dasd_device *device)
593 cdev = device->cdev; 610 cdev = device->cdev;
594 device->cdev = NULL; 611 device->cdev = NULL;
595 612
596 /* Disconnect dasd_devmap structure from ccw_device structure. */
597 cdev->dev.driver_data = NULL;
598
599 /* Put ccw_device structure. */ 613 /* Put ccw_device structure. */
600 put_device(&cdev->dev); 614 put_device(&cdev->dev);
601 615
@@ -615,21 +629,32 @@ dasd_put_device_wake(struct dasd_device *device)
615 629
616/* 630/*
617 * Return dasd_device structure associated with cdev. 631 * Return dasd_device structure associated with cdev.
632 * This function needs to be called with the ccw device
633 * lock held. It can be used from interrupt context.
634 */
635struct dasd_device *
636dasd_device_from_cdev_locked(struct ccw_device *cdev)
637{
638 struct dasd_device *device = cdev->dev.driver_data;
639
640 if (!device)
641 return ERR_PTR(-ENODEV);
642 dasd_get_device(device);
643 return device;
644}
645
646/*
647 * Return dasd_device structure associated with cdev.
618 */ 648 */
619struct dasd_device * 649struct dasd_device *
620dasd_device_from_cdev(struct ccw_device *cdev) 650dasd_device_from_cdev(struct ccw_device *cdev)
621{ 651{
622 struct dasd_devmap *devmap;
623 struct dasd_device *device; 652 struct dasd_device *device;
653 unsigned long flags;
624 654
625 device = ERR_PTR(-ENODEV); 655 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
626 spin_lock(&dasd_devmap_lock); 656 device = dasd_device_from_cdev_locked(cdev);
627 devmap = cdev->dev.driver_data; 657 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
628 if (devmap && devmap->device) {
629 device = devmap->device;
630 dasd_get_device(device);
631 }
632 spin_unlock(&dasd_devmap_lock);
633 return device; 658 return device;
634} 659}
635 660
@@ -730,16 +755,17 @@ static ssize_t
730dasd_discipline_show(struct device *dev, struct device_attribute *attr, 755dasd_discipline_show(struct device *dev, struct device_attribute *attr,
731 char *buf) 756 char *buf)
732{ 757{
733 struct dasd_devmap *devmap; 758 struct dasd_device *device;
734 char *dname; 759 ssize_t len;
735 760
736 spin_lock(&dasd_devmap_lock); 761 device = dasd_device_from_cdev(to_ccwdev(dev));
737 dname = "none"; 762 if (!IS_ERR(device) && device->discipline) {
738 devmap = dev->driver_data; 763 len = snprintf(buf, PAGE_SIZE, "%s\n",
739 if (devmap && devmap->device && devmap->device->discipline) 764 device->discipline->name);
740 dname = devmap->device->discipline->name; 765 dasd_put_device(device);
741 spin_unlock(&dasd_devmap_lock); 766 } else
742 return snprintf(buf, PAGE_SIZE, "%s\n", dname); 767 len = snprintf(buf, PAGE_SIZE, "none\n");
768 return len;
743} 769}
744 770
745static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 771static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index da65f1b032f5..e0bf30ebb215 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -678,7 +678,7 @@ int __init dasd_eer_init(void)
678 return 0; 678 return 0;
679} 679}
680 680
681void __exit dasd_eer_exit(void) 681void dasd_eer_exit(void)
682{ 682{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
684} 684}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3ccf06d28ba1..9f52004f6fc2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
534void dasd_remove_sysfs_files(struct ccw_device *); 534void dasd_remove_sysfs_files(struct ccw_device *);
535 535
536struct dasd_device *dasd_device_from_cdev(struct ccw_device *); 536struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
537struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
537struct dasd_device *dasd_device_from_devindex(int); 538struct dasd_device *dasd_device_from_devindex(int);
538 539
539int dasd_parse(void); 540int dasd_parse(void);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index ca7d51f7eccc..cab2c736683a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -453,7 +453,7 @@ static int __init xpram_init(void)
453 PRINT_WARN("No expanded memory available\n"); 453 PRINT_WARN("No expanded memory available\n");
454 return -ENODEV; 454 return -ENODEV;
455 } 455 }
456 xpram_pages = xpram_highest_page_index(); 456 xpram_pages = xpram_highest_page_index() + 1;
457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", 457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
458 xpram_pages, (unsigned long) xpram_pages*4); 458 xpram_pages, (unsigned long) xpram_pages*4);
459 rc = xpram_setup_sizes(xpram_pages); 459 rc = xpram_setup_sizes(xpram_pages);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0c0162ff6c0c..c3e97b4fc186 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o 28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
new file mode 100644
index 000000000000..1e3939aeb8ab
--- /dev/null
+++ b/drivers/s390/char/monwriter.c
@@ -0,0 +1,292 @@
1/*
2 * drivers/s390/char/monwriter.c
3 *
4 * Character device driver for writing z/VM *MONITOR service records.
5 *
6 * Copyright (C) IBM Corp. 2006
7 *
8 * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/ctype.h>
19#include <linux/poll.h>
20#include <asm/uaccess.h>
21#include <asm/ebcdic.h>
22#include <asm/io.h>
23#include <asm/appldata.h>
24#include <asm/monwriter.h>
25
26#define MONWRITE_MAX_DATALEN 4024
27
28static int mon_max_bufs = 255;
29
30struct mon_buf {
31 struct list_head list;
32 struct monwrite_hdr hdr;
33 int diag_done;
34 char *data;
35};
36
37struct mon_private {
38 struct list_head list;
39 struct monwrite_hdr hdr;
40 size_t hdr_to_read;
41 size_t data_to_read;
42 struct mon_buf *current_buf;
43 int mon_buf_count;
44};
45
46/*
47 * helper functions
48 */
49
50static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
51{
52 struct appldata_product_id id;
53 int rc;
54
55 strcpy(id.prod_nr, "LNXAPPL");
56 id.prod_fn = myhdr->applid;
57 id.record_nr = myhdr->record_num;
58 id.version_nr = myhdr->version;
59 id.release_nr = myhdr->release;
60 id.mod_lvl = myhdr->mod_level;
61 rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
62 if (rc <= 0)
63 return rc;
64 if (rc == 5)
65 return -EPERM;
66 printk("DIAG X'DC' error with return code: %i\n", rc);
67 return -EINVAL;
68}
69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr)
72{
73 struct mon_buf *entry, *next;
74
75 list_for_each_entry_safe(entry, next, &monpriv->list, list)
76 if (entry->hdr.applid == monhdr->applid &&
77 entry->hdr.record_num == monhdr->record_num &&
78 entry->hdr.version == monhdr->version &&
79 entry->hdr.release == monhdr->release &&
80 entry->hdr.mod_level == monhdr->mod_level)
81 return entry;
82 return NULL;
83}
84
85static int monwrite_new_hdr(struct mon_private *monpriv)
86{
87 struct monwrite_hdr *monhdr = &monpriv->hdr;
88 struct mon_buf *monbuf;
89 int rc;
90
91 if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
92 monhdr->mon_function > MONWRITE_START_CONFIG ||
93 monhdr->hdrlen != sizeof(struct monwrite_hdr))
94 return -EINVAL;
95 monbuf = monwrite_find_hdr(monpriv, monhdr);
96 if (monbuf) {
97 if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
98 monhdr->datalen = monbuf->hdr.datalen;
99 rc = monwrite_diag(monhdr, monbuf->data,
100 APPLDATA_STOP_REC);
101 list_del(&monbuf->list);
102 monpriv->mon_buf_count--;
103 kfree(monbuf->data);
104 kfree(monbuf);
105 monbuf = NULL;
106 }
107 } else {
108 if (monpriv->mon_buf_count >= mon_max_bufs)
109 return -ENOSPC;
110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
111 if (!monbuf)
112 return -ENOMEM;
113 monbuf->data = kzalloc(monbuf->hdr.datalen,
114 GFP_KERNEL | GFP_DMA);
115 if (!monbuf->data) {
116 kfree(monbuf);
117 return -ENOMEM;
118 }
119 monbuf->hdr = *monhdr;
120 list_add_tail(&monbuf->list, &monpriv->list);
121 monpriv->mon_buf_count++;
122 }
123 monpriv->current_buf = monbuf;
124 return 0;
125}
126
127static int monwrite_new_data(struct mon_private *monpriv)
128{
129 struct monwrite_hdr *monhdr = &monpriv->hdr;
130 struct mon_buf *monbuf = monpriv->current_buf;
131 int rc = 0;
132
133 switch (monhdr->mon_function) {
134 case MONWRITE_START_INTERVAL:
135 if (!monbuf->diag_done) {
136 rc = monwrite_diag(monhdr, monbuf->data,
137 APPLDATA_START_INTERVAL_REC);
138 monbuf->diag_done = 1;
139 }
140 break;
141 case MONWRITE_START_CONFIG:
142 if (!monbuf->diag_done) {
143 rc = monwrite_diag(monhdr, monbuf->data,
144 APPLDATA_START_CONFIG_REC);
145 monbuf->diag_done = 1;
146 }
147 break;
148 case MONWRITE_GEN_EVENT:
149 rc = monwrite_diag(monhdr, monbuf->data,
150 APPLDATA_GEN_EVENT_REC);
151 list_del(&monpriv->current_buf->list);
152 kfree(monpriv->current_buf->data);
153 kfree(monpriv->current_buf);
154 monpriv->current_buf = NULL;
155 break;
156 default:
157 /* monhdr->mon_function is checked in monwrite_new_hdr */
158 BUG();
159 }
160 return rc;
161}
162
163/*
164 * file operations
165 */
166
167static int monwrite_open(struct inode *inode, struct file *filp)
168{
169 struct mon_private *monpriv;
170
171 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
172 if (!monpriv)
173 return -ENOMEM;
174 INIT_LIST_HEAD(&monpriv->list);
175 monpriv->hdr_to_read = sizeof(monpriv->hdr);
176 filp->private_data = monpriv;
177 return nonseekable_open(inode, filp);
178}
179
180static int monwrite_close(struct inode *inode, struct file *filp)
181{
182 struct mon_private *monpriv = filp->private_data;
183 struct mon_buf *entry, *next;
184
185 list_for_each_entry_safe(entry, next, &monpriv->list, list) {
186 if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
187 monwrite_diag(&entry->hdr, entry->data,
188 APPLDATA_STOP_REC);
189 monpriv->mon_buf_count--;
190 list_del(&entry->list);
191 kfree(entry->data);
192 kfree(entry);
193 }
194 kfree(monpriv);
195 return 0;
196}
197
198static ssize_t monwrite_write(struct file *filp, const char __user *data,
199 size_t count, loff_t *ppos)
200{
201 struct mon_private *monpriv = filp->private_data;
202 size_t len, written;
203 void *to;
204 int rc;
205
206 for (written = 0; written < count; ) {
207 if (monpriv->hdr_to_read) {
208 len = min(count - written, monpriv->hdr_to_read);
209 to = (char *) &monpriv->hdr +
210 sizeof(monpriv->hdr) - monpriv->hdr_to_read;
211 if (copy_from_user(to, data + written, len)) {
212 rc = -EFAULT;
213 goto out_error;
214 }
215 monpriv->hdr_to_read -= len;
216 written += len;
217 if (monpriv->hdr_to_read > 0)
218 continue;
219 rc = monwrite_new_hdr(monpriv);
220 if (rc)
221 goto out_error;
222 monpriv->data_to_read = monpriv->current_buf ?
223 monpriv->current_buf->hdr.datalen : 0;
224 }
225
226 if (monpriv->data_to_read) {
227 len = min(count - written, monpriv->data_to_read);
228 to = monpriv->current_buf->data +
229 monpriv->hdr.datalen - monpriv->data_to_read;
230 if (copy_from_user(to, data + written, len)) {
231 rc = -EFAULT;
232 goto out_error;
233 }
234 monpriv->data_to_read -= len;
235 written += len;
236 if (monpriv->data_to_read > 0)
237 continue;
238 rc = monwrite_new_data(monpriv);
239 if (rc)
240 goto out_error;
241 }
242 monpriv->hdr_to_read = sizeof(monpriv->hdr);
243 }
244 return written;
245
246out_error:
247 monpriv->data_to_read = 0;
248 monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
249 return rc;
250}
251
252static struct file_operations monwrite_fops = {
253 .owner = THIS_MODULE,
254 .open = &monwrite_open,
255 .release = &monwrite_close,
256 .write = &monwrite_write,
257};
258
259static struct miscdevice mon_dev = {
260 .name = "monwriter",
261 .fops = &monwrite_fops,
262 .minor = MISC_DYNAMIC_MINOR,
263};
264
265/*
266 * module init/exit
267 */
268
269static int __init mon_init(void)
270{
271 if (MACHINE_IS_VM)
272 return misc_register(&mon_dev);
273 else
274 return -ENODEV;
275}
276
277static void __exit mon_exit(void)
278{
279 WARN_ON(misc_deregister(&mon_dev) != 0);
280}
281
282module_init(mon_init);
283module_exit(mon_exit);
284
285module_param_named(max_bufs, mon_max_bufs, int, 0644);
286MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers"
287 "that can be active at one time");
288
289MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
290MODULE_DESCRIPTION("Character device driver for writing z/VM "
291 "APPLDATA monitor records.");
292MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 19762f3476aa..1678b6c757ec 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004,2005 IBM Corporation 2 * Copyright (C) 2004,2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
5 * 5 *
6 * 6 *
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 87389e730465..8a5975f3dad7 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 IBM Corporation 2 * Copyright (C) 2004, 2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Version 1.0 4 * Version 1.0
5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
6 * 6 *
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c28444af0919..3bb4e472d73d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -256,7 +256,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
256 /* trigger path verification. */ 256 /* trigger path verification. */
257 if (sch->driver && sch->driver->verify) 257 if (sch->driver && sch->driver->verify)
258 sch->driver->verify(&sch->dev); 258 sch->driver->verify(&sch->dev);
259 else if (sch->vpm == mask) 259 else if (sch->lpm == mask)
260 goto out_unreg; 260 goto out_unreg;
261out_unlock: 261out_unlock:
262 spin_unlock_irq(&sch->lock); 262 spin_unlock_irq(&sch->lock);
@@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
378 378
379 if (chp_mask == 0) { 379 if (chp_mask == 0) {
380 spin_unlock_irq(&sch->lock); 380 spin_unlock_irq(&sch->lock);
381 put_device(&sch->dev);
381 return 0; 382 return 0;
382 } 383 }
383 old_lpm = sch->lpm; 384 old_lpm = sch->lpm;
@@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
392 393
393 spin_unlock_irq(&sch->lock); 394 spin_unlock_irq(&sch->lock);
394 put_device(&sch->dev); 395 put_device(&sch->dev);
395 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 396 return 0;
396} 397}
397 398
398 399
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89320c1ad825..2e2882daefbb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -16,11 +16,10 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19
20#include <asm/cio.h> 19#include <asm/cio.h>
21#include <asm/delay.h> 20#include <asm/delay.h>
22#include <asm/irq.h> 21#include <asm/irq.h>
23 22#include <asm/setup.h>
24#include "airq.h" 23#include "airq.h"
25#include "cio.h" 24#include "cio.h"
26#include "css.h" 25#include "css.h"
@@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
192 sch->orb.pfch = sch->options.prefetch == 0; 191 sch->orb.pfch = sch->options.prefetch == 0;
193 sch->orb.spnd = sch->options.suspend; 192 sch->orb.spnd = sch->options.suspend;
194 sch->orb.ssic = sch->options.suspend && sch->options.inter; 193 sch->orb.ssic = sch->options.suspend && sch->options.inter;
195 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 194 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
196#ifdef CONFIG_64BIT 195#ifdef CONFIG_64BIT
197 /* 196 /*
198 * for 64 bit we always support 64 bit IDAWs with 4k page size only 197 * for 64 bit we always support 64 bit IDAWs with 4k page size only
@@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
570 sch->opm = 0xff; 569 sch->opm = 0xff;
571 if (!cio_is_console(sch->schid)) 570 if (!cio_is_console(sch->schid))
572 chsc_validate_chpids(sch); 571 chsc_validate_chpids(sch);
573 sch->lpm = sch->schib.pmcw.pim & 572 sch->lpm = sch->schib.pmcw.pam & sch->opm;
574 sch->schib.pmcw.pam &
575 sch->schib.pmcw.pom &
576 sch->opm;
577 573
578 CIO_DEBUG(KERN_INFO, 0, 574 CIO_DEBUG(KERN_INFO, 0,
579 "Detected device %04x on subchannel 0.%x.%04X" 575 "Detected device %04x on subchannel 0.%x.%04X"
@@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid)
841 return -EBUSY; 837 return -EBUSY;
842} 838}
843 839
844extern void do_reipl(unsigned long devno); 840struct sch_match_id {
845static int 841 struct subchannel_id schid;
846__shutdown_subchannel_easy(struct subchannel_id schid, void *data) 842 struct ccw_dev_id devid;
843 int rc;
844};
845
846static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
847 void *data)
847{ 848{
848 struct schib schib; 849 struct schib schib;
850 struct sch_match_id *match_id = data;
849 851
850 if (stsch_err(schid, &schib)) 852 if (stsch_err(schid, &schib))
851 return -ENXIO; 853 return -ENXIO;
854 if (match_id && schib.pmcw.dnv &&
855 (schib.pmcw.dev == match_id->devid.devno) &&
856 (schid.ssid == match_id->devid.ssid)) {
857 match_id->schid = schid;
858 match_id->rc = 0;
859 }
852 if (!schib.pmcw.ena) 860 if (!schib.pmcw.ena)
853 return 0; 861 return 0;
854 switch(__disable_subchannel_easy(schid, &schib)) { 862 switch(__disable_subchannel_easy(schid, &schib)) {
@@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
864 return 0; 872 return 0;
865} 873}
866 874
867void 875static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
868clear_all_subchannels(void) 876 struct subchannel_id *schid)
869{ 877{
878 struct sch_match_id match_id;
879
880 match_id.devid = *devid;
881 match_id.rc = -ENODEV;
870 local_irq_disable(); 882 local_irq_disable();
871 for_each_subchannel(__shutdown_subchannel_easy, NULL); 883 for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
884 if (match_id.rc == 0)
885 *schid = match_id.schid;
886 return match_id.rc;
872} 887}
873 888
889
890void clear_all_subchannels(void)
891{
892 local_irq_disable();
893 for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
894}
895
896extern void do_reipl_asm(__u32 schid);
897
874/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 898/* Make sure all subchannels are quiet before we re-ipl an lpar. */
875void 899void reipl_ccw_dev(struct ccw_dev_id *devid)
876reipl(unsigned long devno)
877{ 900{
878 clear_all_subchannels(); 901 struct subchannel_id schid;
902
903 if (clear_all_subchannels_and_match(devid, &schid))
904 panic("IPL Device not found\n");
879 cio_reset_channel_paths(); 905 cio_reset_channel_paths();
880 do_reipl(devno); 906 do_reipl_asm(*((__u32*)&schid));
907}
908
909extern struct schib ipl_schib;
910
911/*
912 * ipl_save_parameters gets called very early. It is not allowed to access
913 * anything in the bss section at all. The bss section is not cleared yet,
914 * but may contain some ipl parameters written by the firmware.
915 * These parameters (if present) are copied to 0x2000.
916 * To avoid corruption of the ipl parameters, all variables used by this
917 * function must reside on the stack or in the data section.
918 */
919void ipl_save_parameters(void)
920{
921 struct subchannel_id schid;
922 unsigned int *ipl_ptr;
923 void *src, *dst;
924
925 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
926 if (!schid.one)
927 return;
928 if (stsch(schid, &ipl_schib))
929 return;
930 if (!ipl_schib.pmcw.dnv)
931 return;
932 ipl_devno = ipl_schib.pmcw.dev;
933 ipl_flags |= IPL_DEVNO_VALID;
934 if (!ipl_schib.pmcw.qf)
935 return;
936 ipl_flags |= IPL_PARMBLOCK_VALID;
937 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
938 src = (void *)(unsigned long)*ipl_ptr;
939 dst = (void *)IPL_PARMBLOCK_ORIGIN;
940 memmove(dst, src, PAGE_SIZE);
941 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
881} 942}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 13eeea3d547f..7086a74e9871 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid)
182 return dev ? to_subchannel(dev) : NULL; 182 return dev ? to_subchannel(dev) : NULL;
183} 183}
184 184
185 185static inline int css_get_subchannel_status(struct subchannel *sch)
186static inline int
187css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
188{ 186{
189 struct schib schib; 187 struct schib schib;
190 int cc;
191 188
192 cc = stsch(schid, &schib); 189 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
193 if (cc)
194 return CIO_GONE;
195 if (!schib.pmcw.dnv)
196 return CIO_GONE; 190 return CIO_GONE;
197 if (sch && sch->schib.pmcw.dnv && 191 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
198 (schib.pmcw.dev != sch->schib.pmcw.dev))
199 return CIO_REVALIDATE; 192 return CIO_REVALIDATE;
200 if (sch && !sch->lpm) 193 if (!sch->lpm)
201 return CIO_NO_PATH; 194 return CIO_NO_PATH;
202 return CIO_OPER; 195 return CIO_OPER;
203} 196}
204 197
205static int 198static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
206css_evaluate_subchannel(struct subchannel_id schid, int slow)
207{ 199{
208 int event, ret, disc; 200 int event, ret, disc;
209 struct subchannel *sch;
210 unsigned long flags; 201 unsigned long flags;
202 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
211 203
212 sch = get_subchannel_by_schid(schid); 204 spin_lock_irqsave(&sch->lock, flags);
213 disc = sch ? device_is_disconnected(sch) : 0; 205 disc = device_is_disconnected(sch);
214 if (disc && slow) { 206 if (disc && slow) {
215 if (sch) 207 /* Disconnected devices are evaluated directly only.*/
216 put_device(&sch->dev); 208 spin_unlock_irqrestore(&sch->lock, flags);
217 return 0; /* Already processed. */ 209 return 0;
218 } 210 }
219 /* 211 /* No interrupt after machine check - kill pending timers. */
220 * We've got a machine check, so running I/O won't get an interrupt. 212 device_kill_pending_timer(sch);
221 * Kill any pending timers.
222 */
223 if (sch)
224 device_kill_pending_timer(sch);
225 if (!disc && !slow) { 213 if (!disc && !slow) {
226 if (sch) 214 /* Non-disconnected devices are evaluated on the slow path. */
227 put_device(&sch->dev); 215 spin_unlock_irqrestore(&sch->lock, flags);
228 return -EAGAIN; /* Will be done on the slow path. */ 216 return -EAGAIN;
229 } 217 }
230 event = css_get_subchannel_status(sch, schid); 218 event = css_get_subchannel_status(sch);
231 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 219 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
232 schid.ssid, schid.sch_no, event, 220 sch->schid.ssid, sch->schid.sch_no, event,
233 sch?(disc?"disconnected":"normal"):"unknown", 221 disc ? "disconnected" : "normal",
234 slow?"slow":"fast"); 222 slow ? "slow" : "fast");
223 /* Analyze subchannel status. */
224 action = NONE;
235 switch (event) { 225 switch (event) {
236 case CIO_NO_PATH: 226 case CIO_NO_PATH:
237 case CIO_GONE: 227 if (disc) {
238 if (!sch) { 228 /* Check if paths have become available. */
239 /* Never used this subchannel. Ignore. */ 229 action = REPROBE;
240 ret = 0;
241 break; 230 break;
242 } 231 }
243 if (disc && (event == CIO_NO_PATH)) { 232 /* fall through */
244 /* 233 case CIO_GONE:
245 * Uargh, hack again. Because we don't get a machine 234 /* Prevent unwanted effects when opening lock. */
246 * check on configure on, our path bookkeeping can 235 cio_disable_subchannel(sch);
247 * be out of date here (it's fine while we only do 236 device_set_disconnected(sch);
248 * logical varying or get chsc machine checks). We 237 /* Ask driver what to do with device. */
249 * need to force reprobing or we might miss devices 238 action = UNREGISTER;
250 * coming operational again. It won't do harm in real 239 if (sch->driver && sch->driver->notify) {
251 * no path situations.
252 */
253 spin_lock_irqsave(&sch->lock, flags);
254 device_trigger_reprobe(sch);
255 spin_unlock_irqrestore(&sch->lock, flags); 240 spin_unlock_irqrestore(&sch->lock, flags);
256 ret = 0; 241 ret = sch->driver->notify(&sch->dev, event);
257 break; 242 spin_lock_irqsave(&sch->lock, flags);
258 } 243 if (ret)
259 if (sch->driver && sch->driver->notify && 244 action = NONE;
260 sch->driver->notify(&sch->dev, event)) {
261 cio_disable_subchannel(sch);
262 device_set_disconnected(sch);
263 ret = 0;
264 break;
265 } 245 }
266 /*
267 * Unregister subchannel.
268 * The device will be killed automatically.
269 */
270 cio_disable_subchannel(sch);
271 css_sch_device_unregister(sch);
272 /* Reset intparm to zeroes. */
273 sch->schib.pmcw.intparm = 0;
274 cio_modify(sch);
275 put_device(&sch->dev);
276 ret = 0;
277 break; 246 break;
278 case CIO_REVALIDATE: 247 case CIO_REVALIDATE:
279 /* 248 /* Device will be removed, so no notify necessary. */
280 * Revalidation machine check. Sick. 249 if (disc)
281 * We don't notify the driver since we have to throw the device 250 /* Reprobe because immediate unregister might block. */
282 * away in any case. 251 action = REPROBE;
283 */ 252 else
284 if (!disc) { 253 action = UNREGISTER_PROBE;
285 css_sch_device_unregister(sch);
286 /* Reset intparm to zeroes. */
287 sch->schib.pmcw.intparm = 0;
288 cio_modify(sch);
289 put_device(&sch->dev);
290 ret = css_probe_device(schid);
291 } else {
292 /*
293 * We can't immediately deregister the disconnected
294 * device since it might block.
295 */
296 spin_lock_irqsave(&sch->lock, flags);
297 device_trigger_reprobe(sch);
298 spin_unlock_irqrestore(&sch->lock, flags);
299 ret = 0;
300 }
301 break; 254 break;
302 case CIO_OPER: 255 case CIO_OPER:
303 if (disc) { 256 if (disc)
304 spin_lock_irqsave(&sch->lock, flags);
305 /* Get device operational again. */ 257 /* Get device operational again. */
306 device_trigger_reprobe(sch); 258 action = REPROBE;
307 spin_unlock_irqrestore(&sch->lock, flags); 259 break;
308 } 260 }
309 ret = sch ? 0 : css_probe_device(schid); 261 /* Perform action. */
262 ret = 0;
263 switch (action) {
264 case UNREGISTER:
265 case UNREGISTER_PROBE:
266 /* Unregister device (will use subchannel lock). */
267 spin_unlock_irqrestore(&sch->lock, flags);
268 css_sch_device_unregister(sch);
269 spin_lock_irqsave(&sch->lock, flags);
270
271 /* Reset intparm to zeroes. */
272 sch->schib.pmcw.intparm = 0;
273 cio_modify(sch);
274
275 /* Probe if necessary. */
276 if (action == UNREGISTER_PROBE)
277 ret = css_probe_device(sch->schid);
278 break;
279 case REPROBE:
280 device_trigger_reprobe(sch);
310 break; 281 break;
311 default: 282 default:
312 BUG(); 283 break;
313 ret = 0; 284 }
285 spin_unlock_irqrestore(&sch->lock, flags);
286
287 return ret;
288}
289
290static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
291{
292 struct schib schib;
293
294 if (!slow) {
295 /* Will be done on the slow path. */
296 return -EAGAIN;
314 } 297 }
298 if (stsch(schid, &schib) || !schib.pmcw.dnv) {
299 /* Unusable - ignore. */
300 return 0;
301 }
302 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
303 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
304
305 return css_probe_device(schid);
306}
307
308static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
309{
310 struct subchannel *sch;
311 int ret;
312
313 sch = get_subchannel_by_schid(schid);
314 if (sch) {
315 ret = css_evaluate_known_subchannel(sch, slow);
316 put_device(&sch->dev);
317 } else
318 ret = css_evaluate_new_subchannel(schid, slow);
319
315 return ret; 320 return ret;
316} 321}
317 322
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 646da5640401..688945662c15 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
52 return 1; 52 return 1;
53} 53}
54 54
55/* 55/* Store modalias string delimited by prefix/suffix string into buffer with
56 * Hotplugging interface for ccw devices. 56 * specified size. Return length of resulting string (excluding trailing '\0')
57 * Heavily modeled on pci and usb hotplug. 57 * even if string doesn't fit buffer (snprintf semantics). */
58 */ 58static int snprint_alias(char *buf, size_t size, const char *prefix,
59static int 59 struct ccw_device_id *id, const char *suffix)
60ccw_uevent (struct device *dev, char **envp, int num_envp,
61 char *buffer, int buffer_size)
62{ 60{
63 struct ccw_device *cdev = to_ccwdev(dev); 61 int len;
64 int i = 0;
65 int length = 0;
66 62
67 if (!cdev) 63 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
68 return -ENODEV; 64 id->cu_model);
65 if (len > size)
66 return len;
67 buf += len;
68 size -= len;
69 69
70 /* what we want to pass to /sbin/hotplug */ 70 if (id->dev_type != 0)
71 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
72 id->dev_model, suffix);
73 else
74 len += snprintf(buf, size, "dtdm%s", suffix);
71 75
72 envp[i++] = buffer; 76 return len;
73 length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", 77}
74 cdev->id.cu_type);
75 if ((buffer_size - length <= 0) || (i >= num_envp))
76 return -ENOMEM;
77 ++length;
78 buffer += length;
79 78
79/* Set up environment variables for ccw device uevent. Return 0 on success,
80 * non-zero otherwise. */
81static int ccw_uevent(struct device *dev, char **envp, int num_envp,
82 char *buffer, int buffer_size)
83{
84 struct ccw_device *cdev = to_ccwdev(dev);
85 struct ccw_device_id *id = &(cdev->id);
86 int i = 0;
87 int len;
88
89 /* CU_TYPE= */
90 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
91 if (len > buffer_size || i >= num_envp)
92 return -ENOMEM;
80 envp[i++] = buffer; 93 envp[i++] = buffer;
81 length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", 94 buffer += len;
82 cdev->id.cu_model); 95 buffer_size -= len;
83 if ((buffer_size - length <= 0) || (i >= num_envp)) 96
97 /* CU_MODEL= */
98 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
99 if (len > buffer_size || i >= num_envp)
84 return -ENOMEM; 100 return -ENOMEM;
85 ++length; 101 envp[i++] = buffer;
86 buffer += length; 102 buffer += len;
103 buffer_size -= len;
87 104
88 /* The next two can be zero, that's ok for us */ 105 /* The next two can be zero, that's ok for us */
89 envp[i++] = buffer; 106 /* DEV_TYPE= */
90 length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", 107 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
91 cdev->id.dev_type); 108 if (len > buffer_size || i >= num_envp)
92 if ((buffer_size - length <= 0) || (i >= num_envp))
93 return -ENOMEM; 109 return -ENOMEM;
94 ++length; 110 envp[i++] = buffer;
95 buffer += length; 111 buffer += len;
112 buffer_size -= len;
96 113
114 /* DEV_MODEL= */
115 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
116 (unsigned char) id->dev_model) + 1;
117 if (len > buffer_size || i >= num_envp)
118 return -ENOMEM;
97 envp[i++] = buffer; 119 envp[i++] = buffer;
98 length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", 120 buffer += len;
99 cdev->id.dev_model); 121 buffer_size -= len;
100 if ((buffer_size - length <= 0) || (i >= num_envp)) 122
123 /* MODALIAS= */
124 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
125 if (len > buffer_size || i >= num_envp)
101 return -ENOMEM; 126 return -ENOMEM;
127 envp[i++] = buffer;
128 buffer += len;
129 buffer_size -= len;
102 130
103 envp[i] = NULL; 131 envp[i] = NULL;
104 132
@@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
251{ 279{
252 struct ccw_device *cdev = to_ccwdev(dev); 280 struct ccw_device *cdev = to_ccwdev(dev);
253 struct ccw_device_id *id = &(cdev->id); 281 struct ccw_device_id *id = &(cdev->id);
254 int ret; 282 int len;
255 283
256 ret = sprintf(buf, "ccw:t%04Xm%02X", 284 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
257 id->cu_type, id->cu_model); 285
258 if (id->dev_type != 0) 286 return len > PAGE_SIZE ? PAGE_SIZE : len;
259 ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
260 id->dev_type, id->dev_model);
261 else
262 ret += sprintf(buf + ret, "dtdm\n");
263 return ret;
264} 287}
265 288
266static ssize_t 289static ssize_t
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 35e162ba6d54..dace46fc32e8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->schid, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pam & sch->opm;
236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom &
238 sch->opm;
239 /* Check since device may again have become not operational. */ 236 /* Check since device may again have become not operational. */
240 if (!sch->schib.pmcw.dnv) 237 if (!sch->schib.pmcw.dnv)
241 state = DEV_STATE_NOT_OPER; 238 state = DEV_STATE_NOT_OPER;
@@ -267,6 +264,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
267 notify = 1; 264 notify = 1;
268 } 265 }
269 /* fill out sense information */ 266 /* fill out sense information */
267 memset(&cdev->id, 0, sizeof(cdev->id));
270 cdev->id.cu_type = cdev->private->senseid.cu_type; 268 cdev->id.cu_type = cdev->private->senseid.cu_type;
271 cdev->id.cu_model = cdev->private->senseid.cu_model; 269 cdev->id.cu_model = cdev->private->senseid.cu_model;
272 cdev->id.dev_type = cdev->private->senseid.dev_type; 270 cdev->id.dev_type = cdev->private->senseid.dev_type;
@@ -454,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
454 return; 452 return;
455 } 453 }
456 /* Start Path Group verification. */ 454 /* Start Path Group verification. */
457 sch->vpm = 0; /* Start with no path groups set. */
458 cdev->private->state = DEV_STATE_VERIFY; 455 cdev->private->state = DEV_STATE_VERIFY;
456 cdev->private->flags.doverify = 0;
459 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
460} 458}
461 459
@@ -555,7 +553,19 @@ ccw_device_nopath_notify(void *data)
555void 553void
556ccw_device_verify_done(struct ccw_device *cdev, int err) 554ccw_device_verify_done(struct ccw_device *cdev, int err)
557{ 555{
558 cdev->private->flags.doverify = 0; 556 struct subchannel *sch;
557
558 sch = to_subchannel(cdev->dev.parent);
559 /* Update schib - pom may have changed. */
560 stsch(sch->schid, &sch->schib);
561 /* Update lpm with verified path mask. */
562 sch->lpm = sch->vpm;
563 /* Repeat path verification? */
564 if (cdev->private->flags.doverify) {
565 cdev->private->flags.doverify = 0;
566 ccw_device_verify_start(cdev);
567 return;
568 }
559 switch (err) { 569 switch (err) {
560 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 570 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
561 cdev->private->options.pgroup = 0; 571 cdev->private->options.pgroup = 0;
@@ -613,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev)
613 if (!cdev->private->options.pgroup) { 623 if (!cdev->private->options.pgroup) {
614 /* Start initial path verification. */ 624 /* Start initial path verification. */
615 cdev->private->state = DEV_STATE_VERIFY; 625 cdev->private->state = DEV_STATE_VERIFY;
626 cdev->private->flags.doverify = 0;
616 ccw_device_verify_start(cdev); 627 ccw_device_verify_start(cdev);
617 return 0; 628 return 0;
618 } 629 }
@@ -659,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev)
659 /* Are we doing path grouping? */ 670 /* Are we doing path grouping? */
660 if (!cdev->private->options.pgroup) { 671 if (!cdev->private->options.pgroup) {
661 /* No, set state offline immediately. */ 672 /* No, set state offline immediately. */
662 sch->vpm = 0;
663 ccw_device_done(cdev, DEV_STATE_OFFLINE); 673 ccw_device_done(cdev, DEV_STATE_OFFLINE);
664 return 0; 674 return 0;
665 } 675 }
@@ -780,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
780 } 790 }
781 /* Device is idle, we can do the path verification. */ 791 /* Device is idle, we can do the path verification. */
782 cdev->private->state = DEV_STATE_VERIFY; 792 cdev->private->state = DEV_STATE_VERIFY;
793 cdev->private->flags.doverify = 0;
783 ccw_device_verify_start(cdev); 794 ccw_device_verify_start(cdev);
784} 795}
785 796
@@ -1042,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1042} 1053}
1043 1054
1044static void 1055static void
1045ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) 1056ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1046{ 1057{
1047 /* When the I/O has terminated, we have to start verification. */ 1058 /* Start verification after current task finished. */
1048 cdev->private->flags.doverify = 1; 1059 cdev->private->flags.doverify = 1;
1049} 1060}
1050 1061
@@ -1110,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch)
1110 * The pim, pam, pom values may not be accurate, but they are the best 1121 * The pim, pam, pom values may not be accurate, but they are the best
1111 * we have before performing device selection :/ 1122 * we have before performing device selection :/
1112 */ 1123 */
1113 sch->lpm = sch->schib.pmcw.pim & 1124 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1114 sch->schib.pmcw.pam &
1115 sch->schib.pmcw.pom &
1116 sch->opm;
1117 /* Re-set some bits in the pmcw that were lost. */ 1125 /* Re-set some bits in the pmcw that were lost. */
1118 sch->schib.pmcw.isc = 3; 1126 sch->schib.pmcw.isc = 3;
1119 sch->schib.pmcw.csense = 1; 1127 sch->schib.pmcw.csense = 1;
@@ -1237,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1237 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1245 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1238 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1246 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1239 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1247 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1240 [DEV_EVENT_VERIFY] = ccw_device_nop, 1248 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1241 }, 1249 },
1242 [DEV_STATE_ONLINE] = { 1250 [DEV_STATE_ONLINE] = {
1243 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1251 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
@@ -1280,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1280 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1288 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1281 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1289 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1282 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1290 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1283 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, 1291 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1284 }, 1292 },
1285 [DEV_STATE_QUIESCE] = { 1293 [DEV_STATE_QUIESCE] = {
1286 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1294 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
@@ -1293,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1293 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1301 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1294 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1302 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1295 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1303 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1296 [DEV_EVENT_VERIFY] = ccw_device_nop, 1304 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1297 }, 1305 },
1298 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1306 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1299 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1307 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 9e3de0bd59b5..93a897eebfff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
96 ret = cio_set_options (sch, flags); 96 ret = cio_set_options (sch, flags);
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 /* Adjust requested path mask to excluded varied off paths. */
100 if (lpm) {
101 lpm &= sch->opm;
102 if (lpm == 0)
103 return -EACCES;
104 }
99 ret = cio_start_key (sch, cpa, lpm, key); 105 ret = cio_start_key (sch, cpa, lpm, key);
100 if (ret == 0) 106 if (ret == 0)
101 cdev->private->intparm = intparm; 107 cdev->private->intparm = intparm;
@@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
250 if (!sch) 256 if (!sch)
251 return 0; 257 return 0;
252 else 258 else
253 return sch->vpm; 259 return sch->lpm;
254} 260}
255 261
256static void 262static void
@@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
304 sch = to_subchannel(cdev->dev.parent); 310 sch = to_subchannel(cdev->dev.parent);
305 do { 311 do {
306 ret = cio_start (sch, ccw, lpm); 312 ret = cio_start (sch, ccw, lpm);
307 if ((ret == -EBUSY) || (ret == -EACCES)) { 313 if (ret == -EBUSY) {
308 /* Try again later. */ 314 /* Try again later. */
309 spin_unlock_irq(&sch->lock); 315 spin_unlock_irq(&sch->lock);
310 msleep(10); 316 msleep(10);
@@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
433 if (!ciw || ciw->cmd == 0) 439 if (!ciw || ciw->cmd == 0)
434 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
435 441
442 /* Adjust requested path mask to excluded varied off paths. */
443 if (lpm) {
444 lpm &= sch->opm;
445 if (lpm == 0)
446 return -EACCES;
447 }
448
436 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 449 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
437 if (!rcd_ccw) 450 if (!rcd_ccw)
438 return -ENOMEM; 451 return -ENOMEM;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 1693a102dcfe..8ca2d078848c 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -245,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
245 memset(&cdev->private->irb, 0, sizeof(struct irb)); 245 memset(&cdev->private->irb, 0, sizeof(struct irb));
246 246
247 /* Try multiple times. */ 247 /* Try multiple times. */
248 ret = -ENODEV; 248 ret = -EACCES;
249 if (cdev->private->iretry > 0) { 249 if (cdev->private->iretry > 0) {
250 cdev->private->iretry--; 250 cdev->private->iretry--;
251 ret = cio_start (sch, cdev->private->iccws, 251 ret = cio_start (sch, cdev->private->iccws,
252 cdev->private->imask); 252 cdev->private->imask);
253 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 253 /* We expect an interrupt in case of success or busy
254 if ((ret != -EACCES) && (ret != -ENODEV)) 254 * indication. */
255 if ((ret == 0) || (ret == -EBUSY))
255 return ret; 256 return ret;
256 } 257 }
257 /* PGID command failed on this path. Switch it off. */ 258 /* PGID command failed on this path. */
258 sch->lpm &= ~cdev->private->imask;
259 sch->vpm &= ~cdev->private->imask;
260 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 259 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
261 "0.%x.%04x, lpm %02X, became 'not operational'\n", 260 "0.%x.%04x, lpm %02X, became 'not operational'\n",
262 cdev->private->devno, sch->schid.ssid, 261 cdev->private->devno, sch->schid.ssid,
@@ -286,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
286 memset(&cdev->private->irb, 0, sizeof(struct irb)); 285 memset(&cdev->private->irb, 0, sizeof(struct irb));
287 286
288 /* Try multiple times. */ 287 /* Try multiple times. */
289 ret = -ENODEV; 288 ret = -EACCES;
290 if (cdev->private->iretry > 0) { 289 if (cdev->private->iretry > 0) {
291 cdev->private->iretry--; 290 cdev->private->iretry--;
292 ret = cio_start (sch, cdev->private->iccws, 291 ret = cio_start (sch, cdev->private->iccws,
293 cdev->private->imask); 292 cdev->private->imask);
294 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 293 /* We expect an interrupt in case of success or busy
295 if ((ret != -EACCES) && (ret != -ENODEV)) 294 * indication. */
295 if ((ret == 0) || (ret == -EBUSY))
296 return ret; 296 return ret;
297 } 297 }
298 /* nop command failed on this path. Switch it off. */ 298 /* nop command failed on this path. */
299 sch->lpm &= ~cdev->private->imask;
300 sch->vpm &= ~cdev->private->imask;
301 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 299 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
302 "0.%x.%04x, lpm %02X, became 'not operational'\n", 300 "0.%x.%04x, lpm %02X, became 'not operational'\n",
303 cdev->private->devno, sch->schid.ssid, 301 cdev->private->devno, sch->schid.ssid,
@@ -372,27 +370,32 @@ static void
372__ccw_device_verify_start(struct ccw_device *cdev) 370__ccw_device_verify_start(struct ccw_device *cdev)
373{ 371{
374 struct subchannel *sch; 372 struct subchannel *sch;
375 __u8 imask, func; 373 __u8 func;
376 int ret; 374 int ret;
377 375
378 sch = to_subchannel(cdev->dev.parent); 376 sch = to_subchannel(cdev->dev.parent);
379 while (sch->vpm != sch->lpm) { 377 /* Repeat for all paths. */
380 /* Find first unequal bit in vpm vs. lpm */ 378 for (; cdev->private->imask; cdev->private->imask >>= 1,
381 for (imask = 0x80; imask != 0; imask >>= 1) 379 cdev->private->iretry = 5) {
382 if ((sch->vpm & imask) != (sch->lpm & imask)) 380 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
383 break; 381 /* Path not available, try next. */
384 cdev->private->imask = imask; 382 continue;
385 if (cdev->private->options.pgroup) { 383 if (cdev->private->options.pgroup) {
386 func = (sch->vpm & imask) ? 384 if (sch->opm & cdev->private->imask)
387 SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; 385 func = SPID_FUNC_ESTABLISH;
386 else
387 func = SPID_FUNC_RESIGN;
388 ret = __ccw_device_do_pgid(cdev, func); 388 ret = __ccw_device_do_pgid(cdev, func);
389 } else 389 } else
390 ret = __ccw_device_do_nop(cdev); 390 ret = __ccw_device_do_nop(cdev);
391 /* We expect an interrupt in case of success or busy
392 * indication. */
391 if (ret == 0 || ret == -EBUSY) 393 if (ret == 0 || ret == -EBUSY)
392 return; 394 return;
393 cdev->private->iretry = 5; 395 /* Permanent path failure, try next. */
394 } 396 }
395 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 397 /* Done with all paths. */
398 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
396} 399}
397 400
398/* 401/*
@@ -421,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
421 else 424 else
422 ret = __ccw_device_check_nop(cdev); 425 ret = __ccw_device_check_nop(cdev);
423 memset(&cdev->private->irb, 0, sizeof(struct irb)); 426 memset(&cdev->private->irb, 0, sizeof(struct irb));
427
424 switch (ret) { 428 switch (ret) {
425 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 429 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
426 case 0: 430 case 0:
427 /* Establish or Resign Path Group done. Update vpm. */ 431 /* Path verification ccw finished successfully, update lpm. */
428 if ((sch->lpm & cdev->private->imask) != 0) 432 sch->vpm |= sch->opm & cdev->private->imask;
429 sch->vpm |= cdev->private->imask; 433 /* Go on with next path. */
430 else 434 cdev->private->imask >>= 1;
431 sch->vpm &= ~cdev->private->imask;
432 cdev->private->iretry = 5; 435 cdev->private->iretry = 5;
433 __ccw_device_verify_start(cdev); 436 __ccw_device_verify_start(cdev);
434 break; 437 break;
@@ -441,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
441 cdev->private->options.pgroup = 0; 444 cdev->private->options.pgroup = 0;
442 else 445 else
443 cdev->private->flags.pgid_single = 1; 446 cdev->private->flags.pgid_single = 1;
447 /* Retry */
448 sch->vpm = 0;
449 cdev->private->imask = 0x80;
450 cdev->private->iretry = 5;
444 /* fall through. */ 451 /* fall through. */
445 case -EAGAIN: /* Try again. */ 452 case -EAGAIN: /* Try again. */
446 __ccw_device_verify_start(cdev); 453 __ccw_device_verify_start(cdev);
@@ -449,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
449 ccw_device_verify_done(cdev, -ETIME); 456 ccw_device_verify_done(cdev, -ETIME);
450 break; 457 break;
451 case -EACCES: /* channel is not operational. */ 458 case -EACCES: /* channel is not operational. */
452 sch->lpm &= ~cdev->private->imask; 459 cdev->private->imask >>= 1;
453 sch->vpm &= ~cdev->private->imask;
454 cdev->private->iretry = 5; 460 cdev->private->iretry = 5;
455 __ccw_device_verify_start(cdev); 461 __ccw_device_verify_start(cdev);
456 break; 462 break;
@@ -463,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev)
463 struct subchannel *sch = to_subchannel(cdev->dev.parent); 469 struct subchannel *sch = to_subchannel(cdev->dev.parent);
464 470
465 cdev->private->flags.pgid_single = 0; 471 cdev->private->flags.pgid_single = 0;
472 cdev->private->imask = 0x80;
466 cdev->private->iretry = 5; 473 cdev->private->iretry = 5;
467 /* 474
468 * Update sch->lpm with current values to catch paths becoming 475 /* Start with empty vpm. */
469 * available again. 476 sch->vpm = 0;
470 */ 477
478 /* Get current pam. */
471 if (stsch(sch->schid, &sch->schib)) { 479 if (stsch(sch->schid, &sch->schib)) {
472 ccw_device_verify_done(cdev, -ENODEV); 480 ccw_device_verify_done(cdev, -ENODEV);
473 return; 481 return;
474 } 482 }
475 sch->lpm = sch->schib.pmcw.pim &
476 sch->schib.pmcw.pam &
477 sch->schib.pmcw.pom &
478 sch->opm;
479 __ccw_device_verify_start(cdev); 483 __ccw_device_verify_start(cdev);
480} 484}
481 485
@@ -524,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
524 switch (ret) { 528 switch (ret) {
525 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 529 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
526 case 0: /* disband successful. */ 530 case 0: /* disband successful. */
527 sch->vpm = 0;
528 ccw_device_disband_done(cdev, ret); 531 ccw_device_disband_done(cdev, ret);
529 break; 532 break;
530 case -EOPNOTSUPP: 533 case -EOPNOTSUPP:
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 7c93a8798d23..cde822d8b5c8 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -115,7 +115,7 @@ qdio_min(int a,int b)
115static inline __u64 115static inline __u64
116qdio_get_micros(void) 116qdio_get_micros(void)
117{ 117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */ 118 return (get_clock() >> 12); /* time>>12 is microseconds */
119} 119}
120 120
121/* 121/*
@@ -1129,7 +1129,7 @@ out:
1129 1129
1130#ifdef QDIO_USE_PROCESSING_STATE 1130#ifdef QDIO_USE_PROCESSING_STATE
1131 if (last_position>=0) 1131 if (last_position>=0)
1132 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); 1132 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1133#endif /* QDIO_USE_PROCESSING_STATE */ 1133#endif /* QDIO_USE_PROCESSING_STATE */
1134 1134
1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ceb3ab31ee08..124569362f02 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -191,49 +191,49 @@ enum qdio_irq_states {
191#if QDIO_VERBOSE_LEVEL>8 191#if QDIO_VERBOSE_LEVEL>8
192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) 192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
193#else 193#else
194#define QDIO_PRINT_STUPID(x...) 194#define QDIO_PRINT_STUPID(x...) do { } while (0)
195#endif 195#endif
196 196
197#if QDIO_VERBOSE_LEVEL>7 197#if QDIO_VERBOSE_LEVEL>7
198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
199#else 199#else
200#define QDIO_PRINT_ALL(x...) 200#define QDIO_PRINT_ALL(x...) do { } while (0)
201#endif 201#endif
202 202
203#if QDIO_VERBOSE_LEVEL>6 203#if QDIO_VERBOSE_LEVEL>6
204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
205#else 205#else
206#define QDIO_PRINT_INFO(x...) 206#define QDIO_PRINT_INFO(x...) do { } while (0)
207#endif 207#endif
208 208
209#if QDIO_VERBOSE_LEVEL>5 209#if QDIO_VERBOSE_LEVEL>5
210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) 210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
211#else 211#else
212#define QDIO_PRINT_WARN(x...) 212#define QDIO_PRINT_WARN(x...) do { } while (0)
213#endif 213#endif
214 214
215#if QDIO_VERBOSE_LEVEL>4 215#if QDIO_VERBOSE_LEVEL>4
216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) 216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
217#else 217#else
218#define QDIO_PRINT_ERR(x...) 218#define QDIO_PRINT_ERR(x...) do { } while (0)
219#endif 219#endif
220 220
221#if QDIO_VERBOSE_LEVEL>3 221#if QDIO_VERBOSE_LEVEL>3
222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) 222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
223#else 223#else
224#define QDIO_PRINT_CRIT(x...) 224#define QDIO_PRINT_CRIT(x...) do { } while (0)
225#endif 225#endif
226 226
227#if QDIO_VERBOSE_LEVEL>2 227#if QDIO_VERBOSE_LEVEL>2
228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) 228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
229#else 229#else
230#define QDIO_PRINT_ALERT(x...) 230#define QDIO_PRINT_ALERT(x...) do { } while (0)
231#endif 231#endif
232 232
233#if QDIO_VERBOSE_LEVEL>1 233#if QDIO_VERBOSE_LEVEL>1
234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
235#else 235#else
236#define QDIO_PRINT_EMERG(x...) 236#define QDIO_PRINT_EMERG(x...) do { } while (0)
237#endif 237#endif
238 238
239#define HEXDUMP16(importance,header,ptr) \ 239#define HEXDUMP16(importance,header,ptr) \
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 15edebbead7f..f0a12d2eb780 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,5 +2,16 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5z90crypt-objs := z90main.o z90hardware.o 5ifdef CONFIG_ZCRYPT_MONOLITHIC
6obj-$(CONFIG_Z90CRYPT) += z90crypt.o 6
7z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
8 zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
9obj-$(CONFIG_ZCRYPT) += z90crypt.o
10
11else
12
13ap-objs := ap_bus.o
14obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
15obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
16
17endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 000000000000..6ed0985c0c91
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1221 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/workqueue.h>
32#include <linux/notifier.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <asm/s390_rdev.h>
36
37#include "ap_bus.h"
38
39/* Some prototypes. */
40static void ap_scan_bus(void *);
41static void ap_poll_all(unsigned long);
42static void ap_poll_timeout(unsigned long);
43static int ap_poll_thread_start(void);
44static void ap_poll_thread_stop(void);
45
46/**
47 * Module description.
48 */
49MODULE_AUTHOR("IBM Corporation");
50MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
51 "Copyright 2006 IBM Corporation");
52MODULE_LICENSE("GPL");
53
54/**
55 * Module parameter
56 */
57int ap_domain_index = -1; /* Adjunct Processor Domain Index */
58module_param_named(domain, ap_domain_index, int, 0000);
59MODULE_PARM_DESC(domain, "domain index for ap devices");
60EXPORT_SYMBOL(ap_domain_index);
61
62static int ap_thread_flag = 1;
63module_param_named(poll_thread, ap_thread_flag, int, 0000);
64MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
65
66static struct device *ap_root_device = NULL;
67
68/**
69 * Workqueue & timer for bus rescan.
70 */
71static struct workqueue_struct *ap_work_queue;
72static struct timer_list ap_config_timer;
73static int ap_config_time = AP_CONFIG_TIME;
74static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
75
76/**
77 * Tasklet & timer for AP request polling.
78 */
79static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
80static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
81static atomic_t ap_poll_requests = ATOMIC_INIT(0);
82static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
83static struct task_struct *ap_poll_kthread = NULL;
84static DEFINE_MUTEX(ap_poll_thread_mutex);
85
86/**
87 * Test if ap instructions are available.
88 *
89 * Returns 0 if the ap instructions are installed.
90 */
91static inline int ap_instructions_available(void)
92{
93 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
94 register unsigned long reg1 asm ("1") = -ENODEV;
95 register unsigned long reg2 asm ("2") = 0UL;
96
97 asm volatile(
98 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
99 "0: la %1,0\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
103 return reg1;
104}
105
106/**
107 * Test adjunct processor queue.
108 * @qid: the ap queue number
109 * @queue_depth: pointer to queue depth value
110 * @device_type: pointer to device type value
111 *
112 * Returns ap queue status structure.
113 */
114static inline struct ap_queue_status
115ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
116{
117 register unsigned long reg0 asm ("0") = qid;
118 register struct ap_queue_status reg1 asm ("1");
119 register unsigned long reg2 asm ("2") = 0UL;
120
121 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
122 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
123 *device_type = (int) (reg2 >> 24);
124 *queue_depth = (int) (reg2 & 0xff);
125 return reg1;
126}
127
128/**
129 * Reset adjunct processor queue.
130 * @qid: the ap queue number
131 *
132 * Returns ap queue status structure.
133 */
134static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
135{
136 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
137 register struct ap_queue_status reg1 asm ("1");
138 register unsigned long reg2 asm ("2") = 0UL;
139
140 asm volatile(
141 ".long 0xb2af0000" /* PQAP(RAPQ) */
142 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
143 return reg1;
144}
145
146/**
147 * Send message to adjunct processor queue.
148 * @qid: the ap queue number
149 * @psmid: the program supplied message identifier
150 * @msg: the message text
151 * @length: the message length
152 *
153 * Returns ap queue status structure.
154 *
155 * Condition code 1 on NQAP can't happen because the L bit is 1.
156 *
157 * Condition code 2 on NQAP also means the send is incomplete,
158 * because a segment boundary was reached. The NQAP is repeated.
159 */
160static inline struct ap_queue_status
161__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
162{
163 typedef struct { char _[length]; } msgblock;
164 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
165 register struct ap_queue_status reg1 asm ("1");
166 register unsigned long reg2 asm ("2") = (unsigned long) msg;
167 register unsigned long reg3 asm ("3") = (unsigned long) length;
168 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
169 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
170
171 asm volatile (
172 "0: .long 0xb2ad0042\n" /* DQAP */
173 " brc 2,0b"
174 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
175 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
176 : "cc" );
177 return reg1;
178}
179
180int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
181{
182 struct ap_queue_status status;
183
184 status = __ap_send(qid, psmid, msg, length);
185 switch (status.response_code) {
186 case AP_RESPONSE_NORMAL:
187 return 0;
188 case AP_RESPONSE_Q_FULL:
189 return -EBUSY;
190 default: /* Device is gone. */
191 return -ENODEV;
192 }
193}
194EXPORT_SYMBOL(ap_send);
195
196/*
197 * Receive message from adjunct processor queue.
198 * @qid: the ap queue number
199 * @psmid: pointer to program supplied message identifier
200 * @msg: the message text
201 * @length: the message length
202 *
203 * Returns ap queue status structure.
204 *
205 * Condition code 1 on DQAP means the receive has taken place
206 * but only partially. The response is incomplete, hence the
207 * DQAP is repeated.
208 *
209 * Condition code 2 on DQAP also means the receive is incomplete,
210 * this time because a segment boundary was reached. Again, the
211 * DQAP is repeated.
212 *
213 * Note that gpr2 is used by the DQAP instruction to keep track of
214 * any 'residual' length, in case the instruction gets interrupted.
215 * Hence it gets zeroed before the instruction.
216 */
217static inline struct ap_queue_status
218__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
219{
220 typedef struct { char _[length]; } msgblock;
221 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
222 register struct ap_queue_status reg1 asm ("1");
223 register unsigned long reg2 asm("2") = 0UL;
224 register unsigned long reg4 asm("4") = (unsigned long) msg;
225 register unsigned long reg5 asm("5") = (unsigned long) length;
226 register unsigned long reg6 asm("6") = 0UL;
227 register unsigned long reg7 asm("7") = 0UL;
228
229
230 asm volatile(
231 "0: .long 0xb2ae0064\n"
232 " brc 6,0b\n"
233 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
234 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
235 "=m" (*(msgblock *) msg) : : "cc" );
236 *psmid = (((unsigned long long) reg6) << 32) + reg7;
237 return reg1;
238}
239
240int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
241{
242 struct ap_queue_status status;
243
244 status = __ap_recv(qid, psmid, msg, length);
245 switch (status.response_code) {
246 case AP_RESPONSE_NORMAL:
247 return 0;
248 case AP_RESPONSE_NO_PENDING_REPLY:
249 if (status.queue_empty)
250 return -ENOENT;
251 return -EBUSY;
252 default:
253 return -ENODEV;
254 }
255}
256EXPORT_SYMBOL(ap_recv);
257
258/**
259 * Check if an AP queue is available. The test is repeated for
260 * AP_MAX_RESET times.
261 * @qid: the ap queue number
262 * @queue_depth: pointer to queue depth value
263 * @device_type: pointer to device type value
264 */
265static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
266{
267 struct ap_queue_status status;
268 int t_depth, t_device_type, rc, i;
269
270 rc = -EBUSY;
271 for (i = 0; i < AP_MAX_RESET; i++) {
272 status = ap_test_queue(qid, &t_depth, &t_device_type);
273 switch (status.response_code) {
274 case AP_RESPONSE_NORMAL:
275 *queue_depth = t_depth + 1;
276 *device_type = t_device_type;
277 rc = 0;
278 break;
279 case AP_RESPONSE_Q_NOT_AVAIL:
280 rc = -ENODEV;
281 break;
282 case AP_RESPONSE_RESET_IN_PROGRESS:
283 break;
284 case AP_RESPONSE_DECONFIGURED:
285 rc = -ENODEV;
286 break;
287 case AP_RESPONSE_CHECKSTOPPED:
288 rc = -ENODEV;
289 break;
290 case AP_RESPONSE_BUSY:
291 break;
292 default:
293 BUG();
294 }
295 if (rc != -EBUSY)
296 break;
297 if (i < AP_MAX_RESET - 1)
298 udelay(5);
299 }
300 return rc;
301}
302
303/**
304 * Reset an AP queue and wait for it to become available again.
305 * @qid: the ap queue number
306 */
307static int ap_init_queue(ap_qid_t qid)
308{
309 struct ap_queue_status status;
310 int rc, dummy, i;
311
312 rc = -ENODEV;
313 status = ap_reset_queue(qid);
314 for (i = 0; i < AP_MAX_RESET; i++) {
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 if (status.queue_empty)
318 rc = 0;
319 break;
320 case AP_RESPONSE_Q_NOT_AVAIL:
321 case AP_RESPONSE_DECONFIGURED:
322 case AP_RESPONSE_CHECKSTOPPED:
323 i = AP_MAX_RESET; /* return with -ENODEV */
324 break;
325 case AP_RESPONSE_RESET_IN_PROGRESS:
326 case AP_RESPONSE_BUSY:
327 default:
328 break;
329 }
330 if (rc != -ENODEV)
331 break;
332 if (i < AP_MAX_RESET - 1) {
333 udelay(5);
334 status = ap_test_queue(qid, &dummy, &dummy);
335 }
336 }
337 return rc;
338}
339
340/**
341 * AP device related attributes.
342 */
343static ssize_t ap_hwtype_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 struct ap_device *ap_dev = to_ap_dev(dev);
347 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
348}
349static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
350
351static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct ap_device *ap_dev = to_ap_dev(dev);
355 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
356}
357static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
358
359static ssize_t ap_request_count_show(struct device *dev,
360 struct device_attribute *attr,
361 char *buf)
362{
363 struct ap_device *ap_dev = to_ap_dev(dev);
364 int rc;
365
366 spin_lock_bh(&ap_dev->lock);
367 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
368 spin_unlock_bh(&ap_dev->lock);
369 return rc;
370}
371
372static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
373
374static ssize_t ap_modalias_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
378}
379
380static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
381
382static struct attribute *ap_dev_attrs[] = {
383 &dev_attr_hwtype.attr,
384 &dev_attr_depth.attr,
385 &dev_attr_request_count.attr,
386 &dev_attr_modalias.attr,
387 NULL
388};
389static struct attribute_group ap_dev_attr_group = {
390 .attrs = ap_dev_attrs
391};
392
393/**
394 * AP bus driver registration/unregistration.
395 */
396static int ap_bus_match(struct device *dev, struct device_driver *drv)
397{
398 struct ap_device *ap_dev = to_ap_dev(dev);
399 struct ap_driver *ap_drv = to_ap_drv(drv);
400 struct ap_device_id *id;
401
402 /**
403 * Compare device type of the device with the list of
404 * supported types of the device_driver.
405 */
406 for (id = ap_drv->ids; id->match_flags; id++) {
407 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
408 (id->dev_type != ap_dev->device_type))
409 continue;
410 return 1;
411 }
412 return 0;
413}
414
415/**
416 * uevent function for AP devices. It sets up a single environment
417 * variable DEV_TYPE which contains the hardware device type.
418 */
419static int ap_uevent (struct device *dev, char **envp, int num_envp,
420 char *buffer, int buffer_size)
421{
422 struct ap_device *ap_dev = to_ap_dev(dev);
423 int length;
424
425 if (!ap_dev)
426 return -ENODEV;
427
428 /* Set up DEV_TYPE environment variable. */
429 envp[0] = buffer;
430 length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X",
431 ap_dev->device_type);
432 if (buffer_size - length <= 0)
433 return -ENOMEM;
434 envp[1] = 0;
435 return 0;
436}
437
438static struct bus_type ap_bus_type = {
439 .name = "ap",
440 .match = &ap_bus_match,
441 .uevent = &ap_uevent,
442};
443
444static int ap_device_probe(struct device *dev)
445{
446 struct ap_device *ap_dev = to_ap_dev(dev);
447 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
448 int rc;
449
450 ap_dev->drv = ap_drv;
451 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
452 if (rc)
453 ap_dev->unregistered = 1;
454 return rc;
455}
456
457/**
458 * Flush all requests from the request/pending queue of an AP device.
459 * @ap_dev: pointer to the AP device.
460 */
461static inline void __ap_flush_queue(struct ap_device *ap_dev)
462{
463 struct ap_message *ap_msg, *next;
464
465 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
466 list_del_init(&ap_msg->list);
467 ap_dev->pendingq_count--;
468 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
469 }
470 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
471 list_del_init(&ap_msg->list);
472 ap_dev->requestq_count--;
473 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
474 }
475}
476
477void ap_flush_queue(struct ap_device *ap_dev)
478{
479 spin_lock_bh(&ap_dev->lock);
480 __ap_flush_queue(ap_dev);
481 spin_unlock_bh(&ap_dev->lock);
482}
483EXPORT_SYMBOL(ap_flush_queue);
484
485static int ap_device_remove(struct device *dev)
486{
487 struct ap_device *ap_dev = to_ap_dev(dev);
488 struct ap_driver *ap_drv = ap_dev->drv;
489
490 spin_lock_bh(&ap_dev->lock);
491 __ap_flush_queue(ap_dev);
492 /**
493 * set ->unregistered to 1 while holding the lock. This prevents
494 * new messages to be put on the queue from now on.
495 */
496 ap_dev->unregistered = 1;
497 spin_unlock_bh(&ap_dev->lock);
498 if (ap_drv->remove)
499 ap_drv->remove(ap_dev);
500 return 0;
501}
502
503int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
504 char *name)
505{
506 struct device_driver *drv = &ap_drv->driver;
507
508 drv->bus = &ap_bus_type;
509 drv->probe = ap_device_probe;
510 drv->remove = ap_device_remove;
511 drv->owner = owner;
512 drv->name = name;
513 return driver_register(drv);
514}
515EXPORT_SYMBOL(ap_driver_register);
516
517void ap_driver_unregister(struct ap_driver *ap_drv)
518{
519 driver_unregister(&ap_drv->driver);
520}
521EXPORT_SYMBOL(ap_driver_unregister);
522
523/**
524 * AP bus attributes.
525 */
526static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
527{
528 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
529}
530
531static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
532
533static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
534{
535 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
536}
537
538static ssize_t ap_config_time_store(struct bus_type *bus,
539 const char *buf, size_t count)
540{
541 int time;
542
543 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
544 return -EINVAL;
545 ap_config_time = time;
546 if (!timer_pending(&ap_config_timer) ||
547 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
548 ap_config_timer.expires = jiffies + ap_config_time * HZ;
549 add_timer(&ap_config_timer);
550 }
551 return count;
552}
553
554static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
555
556static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
557{
558 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
559}
560
561static ssize_t ap_poll_thread_store(struct bus_type *bus,
562 const char *buf, size_t count)
563{
564 int flag, rc;
565
566 if (sscanf(buf, "%d\n", &flag) != 1)
567 return -EINVAL;
568 if (flag) {
569 rc = ap_poll_thread_start();
570 if (rc)
571 return rc;
572 }
573 else
574 ap_poll_thread_stop();
575 return count;
576}
577
578static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
579
580static struct bus_attribute *const ap_bus_attrs[] = {
581 &bus_attr_ap_domain,
582 &bus_attr_config_time,
583 &bus_attr_poll_thread,
584 NULL
585};
586
587/**
588 * Pick one of the 16 ap domains.
589 */
590static inline int ap_select_domain(void)
591{
592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j;
594
595 /**
596 * We want to use a single domain. Either the one specified with
597 * the "domain=" parameter or the domain with the maximum number
598 * of devices.
599 */
600 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
601 /* Domain has already been selected. */
602 return 0;
603 best_domain = -1;
604 max_count = 0;
605 for (i = 0; i < AP_DOMAINS; i++) {
606 count = 0;
607 for (j = 0; j < AP_DEVICES; j++) {
608 ap_qid_t qid = AP_MKQID(j, i);
609 rc = ap_query_queue(qid, &queue_depth, &device_type);
610 if (rc)
611 continue;
612 count++;
613 }
614 if (count > max_count) {
615 max_count = count;
616 best_domain = i;
617 }
618 }
619 if (best_domain >= 0){
620 ap_domain_index = best_domain;
621 return 0;
622 }
623 return -ENODEV;
624}
625
626/**
627 * Find the device type if query queue returned a device type of 0.
628 * @ap_dev: pointer to the AP device.
629 */
630static int ap_probe_device_type(struct ap_device *ap_dev)
631{
632 static unsigned char msg[] = {
633 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
634 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
635 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
636 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
637 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
638 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
639 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
640 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
641 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
642 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
643 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
644 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
645 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
646 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
647 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
648 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
649 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
650 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
651 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
652 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
653 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
654 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
655 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
656 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
657 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
658 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
659 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
660 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
661 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
662 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
663 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
664 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
665 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
666 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
667 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
668 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
669 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
670 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
671 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
672 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
673 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
674 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
675 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
676 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
677 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
678 };
679 struct ap_queue_status status;
680 unsigned long long psmid;
681 char *reply;
682 int rc, i;
683
684 reply = (void *) get_zeroed_page(GFP_KERNEL);
685 if (!reply) {
686 rc = -ENOMEM;
687 goto out;
688 }
689
690 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
691 msg, sizeof(msg));
692 if (status.response_code != AP_RESPONSE_NORMAL) {
693 rc = -ENODEV;
694 goto out_free;
695 }
696
697 /* Wait for the test message to complete. */
698 for (i = 0; i < 6; i++) {
699 mdelay(300);
700 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
701 if (status.response_code == AP_RESPONSE_NORMAL &&
702 psmid == 0x0102030405060708ULL)
703 break;
704 }
705 if (i < 6) {
706 /* Got an answer. */
707 if (reply[0] == 0x00 && reply[1] == 0x86)
708 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
709 else
710 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
711 rc = 0;
712 } else
713 rc = -ENODEV;
714
715out_free:
716 free_page((unsigned long) reply);
717out:
718 return rc;
719}
720
721/**
722 * Scan the ap bus for new devices.
723 */
724static int __ap_scan_bus(struct device *dev, void *data)
725{
726 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
727}
728
729static void ap_device_release(struct device *dev)
730{
731 struct ap_device *ap_dev = to_ap_dev(dev);
732
733 kfree(ap_dev);
734}
735
736static void ap_scan_bus(void *data)
737{
738 struct ap_device *ap_dev;
739 struct device *dev;
740 ap_qid_t qid;
741 int queue_depth, device_type;
742 int rc, i;
743
744 if (ap_select_domain() != 0)
745 return;
746 for (i = 0; i < AP_DEVICES; i++) {
747 qid = AP_MKQID(i, ap_domain_index);
748 dev = bus_find_device(&ap_bus_type, NULL,
749 (void *)(unsigned long)qid,
750 __ap_scan_bus);
751 if (dev) {
752 put_device(dev);
753 continue;
754 }
755 rc = ap_query_queue(qid, &queue_depth, &device_type);
756 if (rc)
757 continue;
758 rc = ap_init_queue(qid);
759 if (rc)
760 continue;
761 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
762 if (!ap_dev)
763 break;
764 ap_dev->qid = qid;
765 ap_dev->queue_depth = queue_depth;
766 spin_lock_init(&ap_dev->lock);
767 INIT_LIST_HEAD(&ap_dev->pendingq);
768 INIT_LIST_HEAD(&ap_dev->requestq);
769 if (device_type == 0)
770 ap_probe_device_type(ap_dev);
771 else
772 ap_dev->device_type = device_type;
773
774 ap_dev->device.bus = &ap_bus_type;
775 ap_dev->device.parent = ap_root_device;
776 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
777 AP_QID_DEVICE(ap_dev->qid));
778 ap_dev->device.release = ap_device_release;
779 rc = device_register(&ap_dev->device);
780 if (rc) {
781 kfree(ap_dev);
782 continue;
783 }
784 /* Add device attributes. */
785 rc = sysfs_create_group(&ap_dev->device.kobj,
786 &ap_dev_attr_group);
787 if (rc)
788 device_unregister(&ap_dev->device);
789 }
790}
791
792static void
793ap_config_timeout(unsigned long ptr)
794{
795 queue_work(ap_work_queue, &ap_config_work);
796 ap_config_timer.expires = jiffies + ap_config_time * HZ;
797 add_timer(&ap_config_timer);
798}
799
800/**
801 * Set up the timer to run the poll tasklet
802 */
803static inline void ap_schedule_poll_timer(void)
804{
805 if (timer_pending(&ap_poll_timer))
806 return;
807 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
808}
809
810/**
811 * Receive pending reply messages from an AP device.
812 * @ap_dev: pointer to the AP device
813 * @flags: pointer to control flags, bit 2^0 is set if another poll is
814 * required, bit 2^1 is set if the poll timer needs to get armed
815 * Returns 0 if the device is still present, -ENODEV if not.
816 */
817static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
818{
819 struct ap_queue_status status;
820 struct ap_message *ap_msg;
821
822 if (ap_dev->queue_count <= 0)
823 return 0;
824 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
825 ap_dev->reply->message, ap_dev->reply->length);
826 switch (status.response_code) {
827 case AP_RESPONSE_NORMAL:
828 atomic_dec(&ap_poll_requests);
829 ap_dev->queue_count--;
830 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
831 if (ap_msg->psmid != ap_dev->reply->psmid)
832 continue;
833 list_del_init(&ap_msg->list);
834 ap_dev->pendingq_count--;
835 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
836 break;
837 }
838 if (ap_dev->queue_count > 0)
839 *flags |= 1;
840 break;
841 case AP_RESPONSE_NO_PENDING_REPLY:
842 if (status.queue_empty) {
843 /* The card shouldn't forget requests but who knows. */
844 ap_dev->queue_count = 0;
845 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
846 ap_dev->requestq_count += ap_dev->pendingq_count;
847 ap_dev->pendingq_count = 0;
848 } else
849 *flags |= 2;
850 break;
851 default:
852 return -ENODEV;
853 }
854 return 0;
855}
856
857/**
858 * Send messages from the request queue to an AP device.
859 * @ap_dev: pointer to the AP device
860 * @flags: pointer to control flags, bit 2^0 is set if another poll is
861 * required, bit 2^1 is set if the poll timer needs to get armed
862 * Returns 0 if the device is still present, -ENODEV if not.
863 */
864static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
865{
866 struct ap_queue_status status;
867 struct ap_message *ap_msg;
868
869 if (ap_dev->requestq_count <= 0 ||
870 ap_dev->queue_count >= ap_dev->queue_depth)
871 return 0;
872 /* Start the next request on the queue. */
873 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
874 status = __ap_send(ap_dev->qid, ap_msg->psmid,
875 ap_msg->message, ap_msg->length);
876 switch (status.response_code) {
877 case AP_RESPONSE_NORMAL:
878 atomic_inc(&ap_poll_requests);
879 ap_dev->queue_count++;
880 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
881 ap_dev->requestq_count--;
882 ap_dev->pendingq_count++;
883 if (ap_dev->queue_count < ap_dev->queue_depth &&
884 ap_dev->requestq_count > 0)
885 *flags |= 1;
886 *flags |= 2;
887 break;
888 case AP_RESPONSE_Q_FULL:
889 *flags |= 2;
890 break;
891 case AP_RESPONSE_MESSAGE_TOO_BIG:
892 return -EINVAL;
893 default:
894 return -ENODEV;
895 }
896 return 0;
897}
898
899/**
900 * Poll AP device for pending replies and send new messages. If either
901 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
902 * @ap_dev: pointer to the bus device
903 * @flags: pointer to control flags, bit 2^0 is set if another poll is
904 * required, bit 2^1 is set if the poll timer needs to get armed
905 * Returns 0.
906 */
907static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
908{
909 int rc;
910
911 rc = ap_poll_read(ap_dev, flags);
912 if (rc)
913 return rc;
914 return ap_poll_write(ap_dev, flags);
915}
916
917/**
918 * Queue a message to a device.
919 * @ap_dev: pointer to the AP device
920 * @ap_msg: the message to be queued
921 */
922static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
923{
924 struct ap_queue_status status;
925
926 if (list_empty(&ap_dev->requestq) &&
927 ap_dev->queue_count < ap_dev->queue_depth) {
928 status = __ap_send(ap_dev->qid, ap_msg->psmid,
929 ap_msg->message, ap_msg->length);
930 switch (status.response_code) {
931 case AP_RESPONSE_NORMAL:
932 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
933 atomic_inc(&ap_poll_requests);
934 ap_dev->pendingq_count++;
935 ap_dev->queue_count++;
936 ap_dev->total_request_count++;
937 break;
938 case AP_RESPONSE_Q_FULL:
939 list_add_tail(&ap_msg->list, &ap_dev->requestq);
940 ap_dev->requestq_count++;
941 ap_dev->total_request_count++;
942 return -EBUSY;
943 case AP_RESPONSE_MESSAGE_TOO_BIG:
944 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
945 return -EINVAL;
946 default: /* Device is gone. */
947 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
948 return -ENODEV;
949 }
950 } else {
951 list_add_tail(&ap_msg->list, &ap_dev->requestq);
952 ap_dev->requestq_count++;
953 ap_dev->total_request_count++;
954 return -EBUSY;
955 }
956 ap_schedule_poll_timer();
957 return 0;
958}
959
960void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
961{
962 unsigned long flags;
963 int rc;
964
965 spin_lock_bh(&ap_dev->lock);
966 if (!ap_dev->unregistered) {
967 /* Make room on the queue by polling for finished requests. */
968 rc = ap_poll_queue(ap_dev, &flags);
969 if (!rc)
970 rc = __ap_queue_message(ap_dev, ap_msg);
971 if (!rc)
972 wake_up(&ap_poll_wait);
973 } else {
974 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
975 rc = 0;
976 }
977 spin_unlock_bh(&ap_dev->lock);
978 if (rc == -ENODEV)
979 device_unregister(&ap_dev->device);
980}
981EXPORT_SYMBOL(ap_queue_message);
982
983/**
984 * Cancel a crypto request. This is done by removing the request
985 * from the devive pendingq or requestq queue. Note that the
986 * request stays on the AP queue. When it finishes the message
987 * reply will be discarded because the psmid can't be found.
988 * @ap_dev: AP device that has the message queued
989 * @ap_msg: the message that is to be removed
990 */
991void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
992{
993 struct ap_message *tmp;
994
995 spin_lock_bh(&ap_dev->lock);
996 if (!list_empty(&ap_msg->list)) {
997 list_for_each_entry(tmp, &ap_dev->pendingq, list)
998 if (tmp->psmid == ap_msg->psmid) {
999 ap_dev->pendingq_count--;
1000 goto found;
1001 }
1002 ap_dev->requestq_count--;
1003 found:
1004 list_del_init(&ap_msg->list);
1005 }
1006 spin_unlock_bh(&ap_dev->lock);
1007}
1008EXPORT_SYMBOL(ap_cancel_message);
1009
1010/**
1011 * AP receive polling for finished AP requests
1012 */
1013static void ap_poll_timeout(unsigned long unused)
1014{
1015 tasklet_schedule(&ap_tasklet);
1016}
1017
1018/**
1019 * Poll all AP devices on the bus in a round robin fashion. Continue
1020 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1021 * of the control flags has been set arm the poll timer.
1022 */
1023static int __ap_poll_all(struct device *dev, void *data)
1024{
1025 struct ap_device *ap_dev = to_ap_dev(dev);
1026 int rc;
1027
1028 spin_lock(&ap_dev->lock);
1029 if (!ap_dev->unregistered) {
1030 rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
1031 } else
1032 rc = 0;
1033 spin_unlock(&ap_dev->lock);
1034 if (rc)
1035 device_unregister(&ap_dev->device);
1036 return 0;
1037}
1038
1039static void ap_poll_all(unsigned long dummy)
1040{
1041 unsigned long flags;
1042
1043 do {
1044 flags = 0;
1045 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1046 } while (flags & 1);
1047 if (flags & 2)
1048 ap_schedule_poll_timer();
1049}
1050
1051/**
1052 * AP bus poll thread. The purpose of this thread is to poll for
1053 * finished requests in a loop if there is a "free" cpu - that is
1054 * a cpu that doesn't have anything better to do. The polling stops
1055 * as soon as there is another task or if all messages have been
1056 * delivered.
1057 */
1058static int ap_poll_thread(void *data)
1059{
1060 DECLARE_WAITQUEUE(wait, current);
1061 unsigned long flags;
1062 int requests;
1063
1064 set_user_nice(current, -20);
1065 while (1) {
1066 if (need_resched()) {
1067 schedule();
1068 continue;
1069 }
1070 add_wait_queue(&ap_poll_wait, &wait);
1071 set_current_state(TASK_INTERRUPTIBLE);
1072 if (kthread_should_stop())
1073 break;
1074 requests = atomic_read(&ap_poll_requests);
1075 if (requests <= 0)
1076 schedule();
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(&ap_poll_wait, &wait);
1079
1080 local_bh_disable();
1081 flags = 0;
1082 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1083 local_bh_enable();
1084 }
1085 set_current_state(TASK_RUNNING);
1086 remove_wait_queue(&ap_poll_wait, &wait);
1087 return 0;
1088}
1089
1090static int ap_poll_thread_start(void)
1091{
1092 int rc;
1093
1094 mutex_lock(&ap_poll_thread_mutex);
1095 if (!ap_poll_kthread) {
1096 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1097 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1098 if (rc)
1099 ap_poll_kthread = NULL;
1100 }
1101 else
1102 rc = 0;
1103 mutex_unlock(&ap_poll_thread_mutex);
1104 return rc;
1105}
1106
1107static void ap_poll_thread_stop(void)
1108{
1109 mutex_lock(&ap_poll_thread_mutex);
1110 if (ap_poll_kthread) {
1111 kthread_stop(ap_poll_kthread);
1112 ap_poll_kthread = NULL;
1113 }
1114 mutex_unlock(&ap_poll_thread_mutex);
1115}
1116
1117/**
1118 * The module initialization code.
1119 */
1120int __init ap_module_init(void)
1121{
1122 int rc, i;
1123
1124 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1125 printk(KERN_WARNING "Invalid param: domain = %d. "
1126 " Not loading.\n", ap_domain_index);
1127 return -EINVAL;
1128 }
1129 if (ap_instructions_available() != 0) {
1130 printk(KERN_WARNING "AP instructions not installed.\n");
1131 return -ENODEV;
1132 }
1133
1134 /* Create /sys/bus/ap. */
1135 rc = bus_register(&ap_bus_type);
1136 if (rc)
1137 goto out;
1138 for (i = 0; ap_bus_attrs[i]; i++) {
1139 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1140 if (rc)
1141 goto out_bus;
1142 }
1143
1144 /* Create /sys/devices/ap. */
1145 ap_root_device = s390_root_dev_register("ap");
1146 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1147 if (rc)
1148 goto out_bus;
1149
1150 ap_work_queue = create_singlethread_workqueue("kapwork");
1151 if (!ap_work_queue) {
1152 rc = -ENOMEM;
1153 goto out_root;
1154 }
1155
1156 if (ap_select_domain() == 0)
1157 ap_scan_bus(NULL);
1158
1159 /* Setup the ap bus rescan timer. */
1160 init_timer(&ap_config_timer);
1161 ap_config_timer.function = ap_config_timeout;
1162 ap_config_timer.data = 0;
1163 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1164 add_timer(&ap_config_timer);
1165
1166 /* Start the low priority AP bus poll thread. */
1167 if (ap_thread_flag) {
1168 rc = ap_poll_thread_start();
1169 if (rc)
1170 goto out_work;
1171 }
1172
1173 return 0;
1174
1175out_work:
1176 del_timer_sync(&ap_config_timer);
1177 del_timer_sync(&ap_poll_timer);
1178 destroy_workqueue(ap_work_queue);
1179out_root:
1180 s390_root_dev_unregister(ap_root_device);
1181out_bus:
1182 while (i--)
1183 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1184 bus_unregister(&ap_bus_type);
1185out:
1186 return rc;
1187}
1188
1189static int __ap_match_all(struct device *dev, void *data)
1190{
1191 return 1;
1192}
1193
1194/**
1195 * The module termination code
1196 */
1197void ap_module_exit(void)
1198{
1199 int i;
1200 struct device *dev;
1201
1202 ap_poll_thread_stop();
1203 del_timer_sync(&ap_config_timer);
1204 del_timer_sync(&ap_poll_timer);
1205 destroy_workqueue(ap_work_queue);
1206 s390_root_dev_unregister(ap_root_device);
1207 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1208 __ap_match_all)))
1209 {
1210 device_unregister(dev);
1211 put_device(dev);
1212 }
1213 for (i = 0; ap_bus_attrs[i]; i++)
1214 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1215 bus_unregister(&ap_bus_type);
1216}
1217
1218#ifndef CONFIG_ZCRYPT_MONOLITHIC
1219module_init(ap_module_init);
1220module_exit(ap_module_exit);
1221#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 000000000000..83b69c01cd6e
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,158 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.h
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus header file.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _AP_BUS_H_
27#define _AP_BUS_H_
28
29#include <linux/device.h>
30#include <linux/mod_devicetable.h>
31#include <linux/types.h>
32
33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
38
39extern int ap_domain_index;
40
41/**
42 * The ap_qid_t identifier of an ap queue. It contains a
43 * 6 bit device index and a 4 bit queue index (domain).
44 */
45typedef unsigned int ap_qid_t;
46
47#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
48#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
49#define AP_QID_QUEUE(_qid) ((_qid) & 15)
50
51/**
52 * The ap queue status word is returned by all three AP functions
53 * (PQAP, NQAP and DQAP). There's a set of flags in the first
54 * byte, followed by a 1 byte response code.
55 */
56struct ap_queue_status {
57 unsigned int queue_empty : 1;
58 unsigned int replies_waiting : 1;
59 unsigned int queue_full : 1;
60 unsigned int pad1 : 5;
61 unsigned int response_code : 8;
62 unsigned int pad2 : 16;
63};
64
65#define AP_RESPONSE_NORMAL 0x00
66#define AP_RESPONSE_Q_NOT_AVAIL 0x01
67#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
68#define AP_RESPONSE_DECONFIGURED 0x03
69#define AP_RESPONSE_CHECKSTOPPED 0x04
70#define AP_RESPONSE_BUSY 0x05
71#define AP_RESPONSE_Q_FULL 0x10
72#define AP_RESPONSE_NO_PENDING_REPLY 0x10
73#define AP_RESPONSE_INDEX_TOO_BIG 0x11
74#define AP_RESPONSE_NO_FIRST_PART 0x13
75#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
76
77/**
78 * Known device types
79 */
80#define AP_DEVICE_TYPE_PCICC 3
81#define AP_DEVICE_TYPE_PCICA 4
82#define AP_DEVICE_TYPE_PCIXCC 5
83#define AP_DEVICE_TYPE_CEX2A 6
84#define AP_DEVICE_TYPE_CEX2C 7
85
86struct ap_device;
87struct ap_message;
88
89struct ap_driver {
90 struct device_driver driver;
91 struct ap_device_id *ids;
92
93 int (*probe)(struct ap_device *);
94 void (*remove)(struct ap_device *);
95 /* receive is called from tasklet context */
96 void (*receive)(struct ap_device *, struct ap_message *,
97 struct ap_message *);
98};
99
100#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
101
102int ap_driver_register(struct ap_driver *, struct module *, char *);
103void ap_driver_unregister(struct ap_driver *);
104
105struct ap_device {
106 struct device device;
107 struct ap_driver *drv; /* Pointer to AP device driver. */
108 spinlock_t lock; /* Per device lock. */
109
110 ap_qid_t qid; /* AP queue id. */
111 int queue_depth; /* AP queue depth.*/
112 int device_type; /* AP device type. */
113 int unregistered; /* marks AP device as unregistered */
114
115 int queue_count; /* # messages currently on AP queue. */
116
117 struct list_head pendingq; /* List of message sent to AP queue. */
118 int pendingq_count; /* # requests on pendingq list. */
119 struct list_head requestq; /* List of message yet to be sent. */
120 int requestq_count; /* # requests on requestq list. */
121 int total_request_count; /* # requests ever for this AP device. */
122
123 struct ap_message *reply; /* Per device reply message. */
124
125 void *private; /* ap driver private pointer. */
126};
127
128#define to_ap_dev(x) container_of((x), struct ap_device, device)
129
130struct ap_message {
131 struct list_head list; /* Request queueing. */
132 unsigned long long psmid; /* Message id. */
133 void *message; /* Pointer to message buffer. */
134 size_t length; /* Message length. */
135
136 void *private; /* ap driver private pointer. */
137};
138
139#define AP_DEVICE(dt) \
140 .dev_type=(dt), \
141 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
142
143/**
144 * Note: don't use ap_send/ap_recv after using ap_queue_message
145 * for the first time. Otherwise the ap message queue will get
146 * confused.
147 */
148int ap_send(ap_qid_t, unsigned long long, void *, size_t);
149int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
150
151void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
152void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
153void ap_flush_queue(struct ap_device *ap_dev);
154
155int ap_module_init(void);
156void ap_module_exit(void);
157
158#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
deleted file mode 100644
index dbbcda3c846a..000000000000
--- a/drivers/s390/crypto/z90common.h
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90common.h
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90COMMON_H_
28#define _Z90COMMON_H_
29
30
31#define RESPBUFFSIZE 256
32#define PCI_FUNC_KEY_DECRYPT 0x5044
33#define PCI_FUNC_KEY_ENCRYPT 0x504B
34extern int ext_bitlens;
35
36enum devstat {
37 DEV_GONE,
38 DEV_ONLINE,
39 DEV_QUEUE_FULL,
40 DEV_EMPTY,
41 DEV_NO_WORK,
42 DEV_BAD_MESSAGE,
43 DEV_TSQ_EXCEPTION,
44 DEV_RSQ_EXCEPTION,
45 DEV_SEN_EXCEPTION,
46 DEV_REC_EXCEPTION
47};
48
49enum hdstat {
50 HD_NOT_THERE,
51 HD_BUSY,
52 HD_DECONFIGURED,
53 HD_CHECKSTOPPED,
54 HD_ONLINE,
55 HD_TSQ_EXCEPTION
56};
57
58#define Z90C_NO_DEVICES 1
59#define Z90C_AMBIGUOUS_DOMAIN 2
60#define Z90C_INCORRECT_DOMAIN 3
61#define ENOTINIT 4
62
63#define SEN_BUSY 7
64#define SEN_USER_ERROR 8
65#define SEN_QUEUE_FULL 11
66#define SEN_NOT_AVAIL 16
67#define SEN_PAD_ERROR 17
68#define SEN_RETRY 18
69#define SEN_RELEASED 24
70
71#define REC_EMPTY 4
72#define REC_BUSY 6
73#define REC_OPERAND_INV 8
74#define REC_OPERAND_SIZE 9
75#define REC_EVEN_MOD 10
76#define REC_NO_WORK 11
77#define REC_HARDWAR_ERR 12
78#define REC_NO_RESPONSE 13
79#define REC_RETRY_DEV 14
80#define REC_USER_GONE 15
81#define REC_BAD_MESSAGE 16
82#define REC_INVALID_PAD 17
83#define REC_USE_PCICA 18
84
85#define WRONG_DEVICE_TYPE 20
86
87#define REC_FATAL_ERROR 32
88#define SEN_FATAL_ERROR 33
89#define TSQ_FATAL_ERROR 34
90#define RSQ_FATAL_ERROR 35
91
92#define Z90CRYPT_NUM_TYPES 6
93#define PCICA 0
94#define PCICC 1
95#define PCIXCC_MCL2 2
96#define PCIXCC_MCL3 3
97#define CEX2C 4
98#define CEX2A 5
99#define NILDEV -1
100#define ANYDEV -1
101#define PCIXCC_UNK -2
102
103enum hdevice_type {
104 PCICC_HW = 3,
105 PCICA_HW = 4,
106 PCIXCC_HW = 5,
107 CEX2A_HW = 6,
108 CEX2C_HW = 7
109};
110
111struct CPRBX {
112 unsigned short cprb_len;
113 unsigned char cprb_ver_id;
114 unsigned char pad_000[3];
115 unsigned char func_id[2];
116 unsigned char cprb_flags[4];
117 unsigned int req_parml;
118 unsigned int req_datal;
119 unsigned int rpl_msgbl;
120 unsigned int rpld_parml;
121 unsigned int rpl_datal;
122 unsigned int rpld_datal;
123 unsigned int req_extbl;
124 unsigned char pad_001[4];
125 unsigned int rpld_extbl;
126 unsigned char req_parmb[16];
127 unsigned char req_datab[16];
128 unsigned char rpl_parmb[16];
129 unsigned char rpl_datab[16];
130 unsigned char req_extb[16];
131 unsigned char rpl_extb[16];
132 unsigned short ccp_rtcode;
133 unsigned short ccp_rscode;
134 unsigned int mac_data_len;
135 unsigned char logon_id[8];
136 unsigned char mac_value[8];
137 unsigned char mac_content_flgs;
138 unsigned char pad_002;
139 unsigned short domain;
140 unsigned char pad_003[12];
141 unsigned char pad_004[36];
142};
143
144#ifndef DEV_NAME
145#define DEV_NAME "z90crypt"
146#endif
147#define PRINTK(fmt, args...) \
148 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
149#define PRINTKN(fmt, args...) \
150 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
151#define PRINTKW(fmt, args...) \
152 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
153#define PRINTKC(fmt, args...) \
154 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
155
156#ifdef Z90CRYPT_DEBUG
157#define PDEBUG(fmt, args...) \
158 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
159#else
160#define PDEBUG(fmt, args...) do {} while (0)
161#endif
162
163#define UMIN(a,b) ((a) < (b) ? (a) : (b))
164#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
165
166#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
deleted file mode 100644
index 0ca1d126ccb6..000000000000
--- a/drivers/s390/crypto/z90crypt.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90crypt.h
3 *
4 * z90crypt 1.3.3 (kernel-private header)
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90CRYPT_H_
28#define _Z90CRYPT_H_
29
30#include <asm/z90crypt.h>
31
32/**
33 * local errno definitions
34 */
35#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
36#define EWORKPEND 130 // user issues ioctl while another pending
37#define ERELEASED 131 // user released while ioctl pending
38#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
39#define ETIMEOUT 133 // request timed out
40#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
41#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
42 // (retry in software)
43
44/**
45 * DEPRECATED STRUCTURES
46 */
47
48/**
49 * This structure is DEPRECATED and the corresponding ioctl() has been
50 * replaced with individual ioctl()s for each piece of data!
51 * This structure will NOT survive past version 1.3.1, so switch to the
52 * new ioctl()s.
53 */
54#define MASK_LENGTH 64 // mask length
55struct ica_z90_status {
56 int totalcount;
57 int leedslitecount; // PCICA
58 int leeds2count; // PCICC
59 // int PCIXCCCount; is not in struct for backward compatibility
60 int requestqWaitCount;
61 int pendingqWaitCount;
62 int totalOpenCount;
63 int cryptoDomain;
64 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
65 // 5=CEX2C
66 unsigned char status[MASK_LENGTH];
67 // qdepth: # work elements waiting for each device
68 unsigned char qdepth[MASK_LENGTH];
69};
70
71#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
deleted file mode 100644
index be60795f4a74..000000000000
--- a/drivers/s390/crypto/z90hardware.c
+++ /dev/null
@@ -1,2531 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90hardware.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h>
28#include <linux/compiler.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include "z90crypt.h"
33#include "z90common.h"
34
35struct cca_token_hdr {
36 unsigned char token_identifier;
37 unsigned char version;
38 unsigned short token_length;
39 unsigned char reserved[4];
40};
41
42#define CCA_TKN_HDR_ID_EXT 0x1E
43
44struct cca_private_ext_ME_sec {
45 unsigned char section_identifier;
46 unsigned char version;
47 unsigned short section_length;
48 unsigned char private_key_hash[20];
49 unsigned char reserved1[4];
50 unsigned char key_format;
51 unsigned char reserved2;
52 unsigned char key_name_hash[20];
53 unsigned char key_use_flags[4];
54 unsigned char reserved3[6];
55 unsigned char reserved4[24];
56 unsigned char confounder[24];
57 unsigned char exponent[128];
58 unsigned char modulus[128];
59};
60
61#define CCA_PVT_USAGE_ALL 0x80
62
63struct cca_public_sec {
64 unsigned char section_identifier;
65 unsigned char version;
66 unsigned short section_length;
67 unsigned char reserved[2];
68 unsigned short exponent_len;
69 unsigned short modulus_bit_len;
70 unsigned short modulus_byte_len;
71 unsigned char exponent[3];
72};
73
74struct cca_private_ext_ME {
75 struct cca_token_hdr pvtMEHdr;
76 struct cca_private_ext_ME_sec pvtMESec;
77 struct cca_public_sec pubMESec;
78};
79
80struct cca_public_key {
81 struct cca_token_hdr pubHdr;
82 struct cca_public_sec pubSec;
83};
84
85struct cca_pvt_ext_CRT_sec {
86 unsigned char section_identifier;
87 unsigned char version;
88 unsigned short section_length;
89 unsigned char private_key_hash[20];
90 unsigned char reserved1[4];
91 unsigned char key_format;
92 unsigned char reserved2;
93 unsigned char key_name_hash[20];
94 unsigned char key_use_flags[4];
95 unsigned short p_len;
96 unsigned short q_len;
97 unsigned short dp_len;
98 unsigned short dq_len;
99 unsigned short u_len;
100 unsigned short mod_len;
101 unsigned char reserved3[4];
102 unsigned short pad_len;
103 unsigned char reserved4[52];
104 unsigned char confounder[8];
105};
106
107#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
108#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
109
110struct cca_private_ext_CRT {
111 struct cca_token_hdr pvtCrtHdr;
112 struct cca_pvt_ext_CRT_sec pvtCrtSec;
113 struct cca_public_sec pubCrtSec;
114};
115
116struct ap_status_word {
117 unsigned char q_stat_flags;
118 unsigned char response_code;
119 unsigned char reserved[2];
120};
121
122#define AP_Q_STATUS_EMPTY 0x80
123#define AP_Q_STATUS_REPLIES_WAITING 0x40
124#define AP_Q_STATUS_ARRAY_FULL 0x20
125
126#define AP_RESPONSE_NORMAL 0x00
127#define AP_RESPONSE_Q_NOT_AVAIL 0x01
128#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
129#define AP_RESPONSE_DECONFIGURED 0x03
130#define AP_RESPONSE_CHECKSTOPPED 0x04
131#define AP_RESPONSE_BUSY 0x05
132#define AP_RESPONSE_Q_FULL 0x10
133#define AP_RESPONSE_NO_PENDING_REPLY 0x10
134#define AP_RESPONSE_INDEX_TOO_BIG 0x11
135#define AP_RESPONSE_NO_FIRST_PART 0x13
136#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
137
138#define AP_MAX_CDX_BITL 4
139#define AP_RQID_RESERVED_BITL 4
140#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
141
142struct type4_hdr {
143 unsigned char reserved1;
144 unsigned char msg_type_code;
145 unsigned short msg_len;
146 unsigned char request_code;
147 unsigned char msg_fmt;
148 unsigned short reserved2;
149};
150
151#define TYPE4_TYPE_CODE 0x04
152#define TYPE4_REQU_CODE 0x40
153
154#define TYPE4_SME_LEN 0x0188
155#define TYPE4_LME_LEN 0x0308
156#define TYPE4_SCR_LEN 0x01E0
157#define TYPE4_LCR_LEN 0x03A0
158
159#define TYPE4_SME_FMT 0x00
160#define TYPE4_LME_FMT 0x10
161#define TYPE4_SCR_FMT 0x40
162#define TYPE4_LCR_FMT 0x50
163
164struct type4_sme {
165 struct type4_hdr header;
166 unsigned char message[128];
167 unsigned char exponent[128];
168 unsigned char modulus[128];
169};
170
171struct type4_lme {
172 struct type4_hdr header;
173 unsigned char message[256];
174 unsigned char exponent[256];
175 unsigned char modulus[256];
176};
177
178struct type4_scr {
179 struct type4_hdr header;
180 unsigned char message[128];
181 unsigned char dp[72];
182 unsigned char dq[64];
183 unsigned char p[72];
184 unsigned char q[64];
185 unsigned char u[72];
186};
187
188struct type4_lcr {
189 struct type4_hdr header;
190 unsigned char message[256];
191 unsigned char dp[136];
192 unsigned char dq[128];
193 unsigned char p[136];
194 unsigned char q[128];
195 unsigned char u[136];
196};
197
198union type4_msg {
199 struct type4_sme sme;
200 struct type4_lme lme;
201 struct type4_scr scr;
202 struct type4_lcr lcr;
203};
204
205struct type84_hdr {
206 unsigned char reserved1;
207 unsigned char code;
208 unsigned short len;
209 unsigned char reserved2[4];
210};
211
212#define TYPE84_RSP_CODE 0x84
213
214struct type6_hdr {
215 unsigned char reserved1;
216 unsigned char type;
217 unsigned char reserved2[2];
218 unsigned char right[4];
219 unsigned char reserved3[2];
220 unsigned char reserved4[2];
221 unsigned char apfs[4];
222 unsigned int offset1;
223 unsigned int offset2;
224 unsigned int offset3;
225 unsigned int offset4;
226 unsigned char agent_id[16];
227 unsigned char rqid[2];
228 unsigned char reserved5[2];
229 unsigned char function_code[2];
230 unsigned char reserved6[2];
231 unsigned int ToCardLen1;
232 unsigned int ToCardLen2;
233 unsigned int ToCardLen3;
234 unsigned int ToCardLen4;
235 unsigned int FromCardLen1;
236 unsigned int FromCardLen2;
237 unsigned int FromCardLen3;
238 unsigned int FromCardLen4;
239};
240
241struct CPRB {
242 unsigned char cprb_len[2];
243 unsigned char cprb_ver_id;
244 unsigned char pad_000;
245 unsigned char srpi_rtcode[4];
246 unsigned char srpi_verb;
247 unsigned char flags;
248 unsigned char func_id[2];
249 unsigned char checkpoint_flag;
250 unsigned char resv2;
251 unsigned char req_parml[2];
252 unsigned char req_parmp[4];
253 unsigned char req_datal[4];
254 unsigned char req_datap[4];
255 unsigned char rpl_parml[2];
256 unsigned char pad_001[2];
257 unsigned char rpl_parmp[4];
258 unsigned char rpl_datal[4];
259 unsigned char rpl_datap[4];
260 unsigned char ccp_rscode[2];
261 unsigned char ccp_rtcode[2];
262 unsigned char repd_parml[2];
263 unsigned char mac_data_len[2];
264 unsigned char repd_datal[4];
265 unsigned char req_pc[2];
266 unsigned char res_origin[8];
267 unsigned char mac_value[8];
268 unsigned char logon_id[8];
269 unsigned char usage_domain[2];
270 unsigned char resv3[18];
271 unsigned char svr_namel[2];
272 unsigned char svr_name[8];
273};
274
275struct type6_msg {
276 struct type6_hdr header;
277 struct CPRB CPRB;
278};
279
280struct type86_hdr {
281 unsigned char reserved1;
282 unsigned char type;
283 unsigned char format;
284 unsigned char reserved2;
285 unsigned char reply_code;
286 unsigned char reserved3[3];
287};
288
289#define TYPE86_RSP_CODE 0x86
290#define TYPE86_FMT2 0x02
291
292struct type86_fmt2_msg {
293 struct type86_hdr header;
294 unsigned char reserved[4];
295 unsigned char apfs[4];
296 unsigned int count1;
297 unsigned int offset1;
298 unsigned int count2;
299 unsigned int offset2;
300 unsigned int count3;
301 unsigned int offset3;
302 unsigned int count4;
303 unsigned int offset4;
304};
305
306static struct type6_hdr static_type6_hdr = {
307 0x00,
308 0x06,
309 {0x00,0x00},
310 {0x00,0x00,0x00,0x00},
311 {0x00,0x00},
312 {0x00,0x00},
313 {0x00,0x00,0x00,0x00},
314 0x00000058,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
319 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
320 {0x00,0x00},
321 {0x00,0x00},
322 {0x50,0x44},
323 {0x00,0x00},
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000
332};
333
334static struct type6_hdr static_type6_hdrX = {
335 0x00,
336 0x06,
337 {0x00,0x00},
338 {0x00,0x00,0x00,0x00},
339 {0x00,0x00},
340 {0x00,0x00},
341 {0x00,0x00,0x00,0x00},
342 0x00000058,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
347 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
348 {0x00,0x00},
349 {0x00,0x00},
350 {0x50,0x44},
351 {0x00,0x00},
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000
360};
361
362static struct CPRB static_cprb = {
363 {0x70,0x00},
364 0x41,
365 0x00,
366 {0x00,0x00,0x00,0x00},
367 0x00,
368 0x00,
369 {0x54,0x32},
370 0x01,
371 0x00,
372 {0x00,0x00},
373 {0x00,0x00,0x00,0x00},
374 {0x00,0x00,0x00,0x00},
375 {0x00,0x00,0x00,0x00},
376 {0x00,0x00},
377 {0x00,0x00},
378 {0x00,0x00,0x00,0x00},
379 {0x00,0x00,0x00,0x00},
380 {0x00,0x00,0x00,0x00},
381 {0x00,0x00},
382 {0x00,0x00},
383 {0x00,0x00},
384 {0x00,0x00},
385 {0x00,0x00,0x00,0x00},
386 {0x00,0x00},
387 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
388 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
389 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
390 {0x00,0x00},
391 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
392 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
393 0x00,0x00},
394 {0x08,0x00},
395 {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
396};
397
398struct function_and_rules_block {
399 unsigned char function_code[2];
400 unsigned char ulen[2];
401 unsigned char only_rule[8];
402};
403
404static struct function_and_rules_block static_pkd_function_and_rules = {
405 {0x50,0x44},
406 {0x0A,0x00},
407 {'P','K','C','S','-','1','.','2'}
408};
409
410static struct function_and_rules_block static_pke_function_and_rules = {
411 {0x50,0x4B},
412 {0x0A,0x00},
413 {'P','K','C','S','-','1','.','2'}
414};
415
416struct T6_keyBlock_hdr {
417 unsigned char blen[2];
418 unsigned char ulen[2];
419 unsigned char flags[2];
420};
421
422static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
423 {0x89,0x01},
424 {0x87,0x01},
425 {0x00}
426};
427
428static struct CPRBX static_cprbx = {
429 0x00DC,
430 0x02,
431 {0x00,0x00,0x00},
432 {0x54,0x32},
433 {0x00,0x00,0x00,0x00},
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 {0x00,0x00,0x00,0x00},
442 0x00000000,
443 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
444 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
445 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
446 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
447 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
448 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
449 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
450 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
451 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
452 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
453 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
454 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
455 0x0000,
456 0x0000,
457 0x00000000,
458 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
459 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
460 0x00,
461 0x00,
462 0x0000,
463 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
464 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
465 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
466 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
467};
468
469static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
470 {0x50,0x44},
471 {0x00,0x0A},
472 {'P','K','C','S','-','1','.','2'}
473};
474
475static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
476 {0x50,0x4B},
477 {0x00,0x0A},
478 {'Z','E','R','O','-','P','A','D'}
479};
480
481static struct function_and_rules_block static_pkd_function_and_rulesX = {
482 {0x50,0x44},
483 {0x00,0x0A},
484 {'Z','E','R','O','-','P','A','D'}
485};
486
487static struct function_and_rules_block static_pke_function_and_rulesX = {
488 {0x50,0x4B},
489 {0x00,0x0A},
490 {'M','R','P',' ',' ',' ',' ',' '}
491};
492
493static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
494
495struct T6_keyBlock_hdrX {
496 unsigned short blen;
497 unsigned short ulen;
498 unsigned char flags[2];
499};
500
501static unsigned char static_pad[256] = {
5020x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
5030x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
5040xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
5050x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
5060x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
5070x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
5080x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
5090xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
5100x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
5110x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
5120x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
5130x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
5140x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
5150x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
5160x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
5170x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
518};
519
520static struct cca_private_ext_ME static_pvt_me_key = {
521 {
522 0x1E,
523 0x00,
524 0x0183,
525 {0x00,0x00,0x00,0x00}
526 },
527
528 {
529 0x02,
530 0x00,
531 0x016C,
532 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
533 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
534 0x00,0x00,0x00,0x00},
535 {0x00,0x00,0x00,0x00},
536 0x00,
537 0x00,
538 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
539 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
540 0x00,0x00,0x00,0x00},
541 {0x80,0x00,0x00,0x00},
542 {0x00,0x00,0x00,0x00,0x00,0x00},
543 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
544 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
545 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
546 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
547 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
548 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
549 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
550 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
551 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
552 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
553 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
554 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
555 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
556 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
557 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
558 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
559 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
560 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
561 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
562 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
563 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
564 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
565 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
566 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
567 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
568 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
569 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
570 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
571 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
572 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
573 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
574 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
575 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
576 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
577 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
578 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
579 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
580 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
581 },
582
583 {
584 0x04,
585 0x00,
586 0x000F,
587 {0x00,0x00},
588 0x0003,
589 0x0000,
590 0x0000,
591 {0x01,0x00,0x01}
592 }
593};
594
595static struct cca_public_key static_public_key = {
596 {
597 0x1E,
598 0x00,
599 0x0000,
600 {0x00,0x00,0x00,0x00}
601 },
602
603 {
604 0x04,
605 0x00,
606 0x0000,
607 {0x00,0x00},
608 0x0000,
609 0x0000,
610 0x0000,
611 {0x01,0x00,0x01}
612 }
613};
614
615#define FIXED_TYPE6_ME_LEN 0x0000025F
616
617#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
618
619#define FIXED_TYPE6_ME_LENX 0x000002CB
620
621#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
622
623static struct cca_public_sec static_cca_pub_sec = {
624 0x04,
625 0x00,
626 0x000f,
627 {0x00,0x00},
628 0x0003,
629 0x0000,
630 0x0000,
631 {0x01,0x00,0x01}
632};
633
634#define FIXED_TYPE6_CR_LEN 0x00000177
635
636#define FIXED_TYPE6_CR_LENX 0x000001E3
637
638#define MAX_RESPONSE_SIZE 0x00000710
639
640#define MAX_RESPONSEX_SIZE 0x0000077C
641
642#define RESPONSE_CPRB_SIZE 0x000006B8
643#define RESPONSE_CPRBX_SIZE 0x00000724
644
645struct type50_hdr {
646 u8 reserved1;
647 u8 msg_type_code;
648 u16 msg_len;
649 u8 reserved2;
650 u8 ignored;
651 u16 reserved3;
652};
653
654#define TYPE50_TYPE_CODE 0x50
655
656#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
657#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
658#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
659#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
660
661#define TYPE50_MEB1_FMT 0x0001
662#define TYPE50_MEB2_FMT 0x0002
663#define TYPE50_CRB1_FMT 0x0011
664#define TYPE50_CRB2_FMT 0x0012
665
666struct type50_meb1_msg {
667 struct type50_hdr header;
668 u16 keyblock_type;
669 u8 reserved[6];
670 u8 exponent[128];
671 u8 modulus[128];
672 u8 message[128];
673};
674
675struct type50_meb2_msg {
676 struct type50_hdr header;
677 u16 keyblock_type;
678 u8 reserved[6];
679 u8 exponent[256];
680 u8 modulus[256];
681 u8 message[256];
682};
683
684struct type50_crb1_msg {
685 struct type50_hdr header;
686 u16 keyblock_type;
687 u8 reserved[6];
688 u8 p[64];
689 u8 q[64];
690 u8 dp[64];
691 u8 dq[64];
692 u8 u[64];
693 u8 message[128];
694};
695
696struct type50_crb2_msg {
697 struct type50_hdr header;
698 u16 keyblock_type;
699 u8 reserved[6];
700 u8 p[128];
701 u8 q[128];
702 u8 dp[128];
703 u8 dq[128];
704 u8 u[128];
705 u8 message[256];
706};
707
708union type50_msg {
709 struct type50_meb1_msg meb1;
710 struct type50_meb2_msg meb2;
711 struct type50_crb1_msg crb1;
712 struct type50_crb2_msg crb2;
713};
714
715struct type80_hdr {
716 u8 reserved1;
717 u8 type;
718 u16 len;
719 u8 code;
720 u8 reserved2[3];
721 u8 reserved3[8];
722};
723
724#define TYPE80_RSP_CODE 0x80
725
726struct error_hdr {
727 unsigned char reserved1;
728 unsigned char type;
729 unsigned char reserved2[2];
730 unsigned char reply_code;
731 unsigned char reserved3[3];
732};
733
734#define TYPE82_RSP_CODE 0x82
735#define TYPE88_RSP_CODE 0x88
736
737#define REP82_ERROR_MACHINE_FAILURE 0x10
738#define REP82_ERROR_PREEMPT_FAILURE 0x12
739#define REP82_ERROR_CHECKPT_FAILURE 0x14
740#define REP82_ERROR_MESSAGE_TYPE 0x20
741#define REP82_ERROR_INVALID_COMM_CD 0x21
742#define REP82_ERROR_INVALID_MSG_LEN 0x23
743#define REP82_ERROR_RESERVD_FIELD 0x24
744#define REP82_ERROR_FORMAT_FIELD 0x29
745#define REP82_ERROR_INVALID_COMMAND 0x30
746#define REP82_ERROR_MALFORMED_MSG 0x40
747#define REP82_ERROR_RESERVED_FIELDO 0x50
748#define REP82_ERROR_WORD_ALIGNMENT 0x60
749#define REP82_ERROR_MESSAGE_LENGTH 0x80
750#define REP82_ERROR_OPERAND_INVALID 0x82
751#define REP82_ERROR_OPERAND_SIZE 0x84
752#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
753#define REP82_ERROR_RESERVED_FIELD 0x88
754#define REP82_ERROR_TRANSPORT_FAIL 0x90
755#define REP82_ERROR_PACKET_TRUNCATED 0xA0
756#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
757
758#define REP88_ERROR_MODULE_FAILURE 0x10
759#define REP88_ERROR_MODULE_TIMEOUT 0x11
760#define REP88_ERROR_MODULE_NOTINIT 0x13
761#define REP88_ERROR_MODULE_NOTAVAIL 0x14
762#define REP88_ERROR_MODULE_DISABLED 0x15
763#define REP88_ERROR_MODULE_IN_DIAGN 0x17
764#define REP88_ERROR_FASTPATH_DISABLD 0x19
765#define REP88_ERROR_MESSAGE_TYPE 0x20
766#define REP88_ERROR_MESSAGE_MALFORMD 0x22
767#define REP88_ERROR_MESSAGE_LENGTH 0x23
768#define REP88_ERROR_RESERVED_FIELD 0x24
769#define REP88_ERROR_KEY_TYPE 0x34
770#define REP88_ERROR_INVALID_KEY 0x82
771#define REP88_ERROR_OPERAND 0x84
772#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
773
774#define CALLER_HEADER 12
775
776static inline int
777testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
778{
779 int ccode;
780
781 asm volatile
782#ifdef CONFIG_64BIT
783 (" llgfr 0,%4 \n"
784 " slgr 1,1 \n"
785 " lgr 2,1 \n"
786 "0: .long 0xb2af0000 \n"
787 "1: ipm %0 \n"
788 " srl %0,28 \n"
789 " iihh %0,0 \n"
790 " iihl %0,0 \n"
791 " lgr %1,1 \n"
792 " lgr %3,2 \n"
793 " srl %3,24 \n"
794 " sll 2,24 \n"
795 " srl 2,24 \n"
796 " lgr %2,2 \n"
797 "2: \n"
798 ".section .fixup,\"ax\" \n"
799 "3: \n"
800 " lhi %0,%h5 \n"
801 " jg 2b \n"
802 ".previous \n"
803 ".section __ex_table,\"a\" \n"
804 " .align 8 \n"
805 " .quad 0b,3b \n"
806 " .quad 1b,3b \n"
807 ".previous"
808 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
809 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
810 :"cc","0","1","2","memory");
811#else
812 (" lr 0,%4 \n"
813 " slr 1,1 \n"
814 " lr 2,1 \n"
815 "0: .long 0xb2af0000 \n"
816 "1: ipm %0 \n"
817 " srl %0,28 \n"
818 " lr %1,1 \n"
819 " lr %3,2 \n"
820 " srl %3,24 \n"
821 " sll 2,24 \n"
822 " srl 2,24 \n"
823 " lr %2,2 \n"
824 "2: \n"
825 ".section .fixup,\"ax\" \n"
826 "3: \n"
827 " lhi %0,%h5 \n"
828 " bras 1,4f \n"
829 " .long 2b \n"
830 "4: \n"
831 " l 1,0(1) \n"
832 " br 1 \n"
833 ".previous \n"
834 ".section __ex_table,\"a\" \n"
835 " .align 4 \n"
836 " .long 0b,3b \n"
837 " .long 1b,3b \n"
838 ".previous"
839 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
840 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
841 :"cc","0","1","2","memory");
842#endif
843 return ccode;
844}
845
846static inline int
847resetq(int q_nr, struct ap_status_word *stat_p)
848{
849 int ccode;
850
851 asm volatile
852#ifdef CONFIG_64BIT
853 (" llgfr 0,%2 \n"
854 " lghi 1,1 \n"
855 " sll 1,24 \n"
856 " or 0,1 \n"
857 " slgr 1,1 \n"
858 " lgr 2,1 \n"
859 "0: .long 0xb2af0000 \n"
860 "1: ipm %0 \n"
861 " srl %0,28 \n"
862 " iihh %0,0 \n"
863 " iihl %0,0 \n"
864 " lgr %1,1 \n"
865 "2: \n"
866 ".section .fixup,\"ax\" \n"
867 "3: \n"
868 " lhi %0,%h3 \n"
869 " jg 2b \n"
870 ".previous \n"
871 ".section __ex_table,\"a\" \n"
872 " .align 8 \n"
873 " .quad 0b,3b \n"
874 " .quad 1b,3b \n"
875 ".previous"
876 :"=d" (ccode),"=d" (*stat_p)
877 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
878 :"cc","0","1","2","memory");
879#else
880 (" lr 0,%2 \n"
881 " lhi 1,1 \n"
882 " sll 1,24 \n"
883 " or 0,1 \n"
884 " slr 1,1 \n"
885 " lr 2,1 \n"
886 "0: .long 0xb2af0000 \n"
887 "1: ipm %0 \n"
888 " srl %0,28 \n"
889 " lr %1,1 \n"
890 "2: \n"
891 ".section .fixup,\"ax\" \n"
892 "3: \n"
893 " lhi %0,%h3 \n"
894 " bras 1,4f \n"
895 " .long 2b \n"
896 "4: \n"
897 " l 1,0(1) \n"
898 " br 1 \n"
899 ".previous \n"
900 ".section __ex_table,\"a\" \n"
901 " .align 4 \n"
902 " .long 0b,3b \n"
903 " .long 1b,3b \n"
904 ".previous"
905 :"=d" (ccode),"=d" (*stat_p)
906 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
907 :"cc","0","1","2","memory");
908#endif
909 return ccode;
910}
911
912static inline int
913sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
914{
915 int ccode;
916
917 asm volatile
918#ifdef CONFIG_64BIT
919 (" lgr 6,%3 \n"
920 " llgfr 7,%2 \n"
921 " llgt 0,0(6) \n"
922 " lghi 1,64 \n"
923 " sll 1,24 \n"
924 " or 0,1 \n"
925 " la 6,4(6) \n"
926 " llgt 2,0(6) \n"
927 " llgt 3,4(6) \n"
928 " la 6,8(6) \n"
929 " slr 1,1 \n"
930 "0: .long 0xb2ad0026 \n"
931 "1: brc 2,0b \n"
932 " ipm %0 \n"
933 " srl %0,28 \n"
934 " iihh %0,0 \n"
935 " iihl %0,0 \n"
936 " lgr %1,1 \n"
937 "2: \n"
938 ".section .fixup,\"ax\" \n"
939 "3: \n"
940 " lhi %0,%h4 \n"
941 " jg 2b \n"
942 ".previous \n"
943 ".section __ex_table,\"a\" \n"
944 " .align 8 \n"
945 " .quad 0b,3b \n"
946 " .quad 1b,3b \n"
947 ".previous"
948 :"=d" (ccode),"=d" (*stat)
949 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
950 :"cc","0","1","2","3","6","7","memory");
951#else
952 (" lr 6,%3 \n"
953 " lr 7,%2 \n"
954 " l 0,0(6) \n"
955 " lhi 1,64 \n"
956 " sll 1,24 \n"
957 " or 0,1 \n"
958 " la 6,4(6) \n"
959 " l 2,0(6) \n"
960 " l 3,4(6) \n"
961 " la 6,8(6) \n"
962 " slr 1,1 \n"
963 "0: .long 0xb2ad0026 \n"
964 "1: brc 2,0b \n"
965 " ipm %0 \n"
966 " srl %0,28 \n"
967 " lr %1,1 \n"
968 "2: \n"
969 ".section .fixup,\"ax\" \n"
970 "3: \n"
971 " lhi %0,%h4 \n"
972 " bras 1,4f \n"
973 " .long 2b \n"
974 "4: \n"
975 " l 1,0(1) \n"
976 " br 1 \n"
977 ".previous \n"
978 ".section __ex_table,\"a\" \n"
979 " .align 4 \n"
980 " .long 0b,3b \n"
981 " .long 1b,3b \n"
982 ".previous"
983 :"=d" (ccode),"=d" (*stat)
984 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
985 :"cc","0","1","2","3","6","7","memory");
986#endif
987 return ccode;
988}
989
990static inline int
991rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
992 struct ap_status_word *st)
993{
994 int ccode;
995
996 asm volatile
997#ifdef CONFIG_64BIT
998 (" llgfr 0,%2 \n"
999 " lgr 3,%4 \n"
1000 " lgr 6,%3 \n"
1001 " llgfr 7,%5 \n"
1002 " lghi 1,128 \n"
1003 " sll 1,24 \n"
1004 " or 0,1 \n"
1005 " slgr 1,1 \n"
1006 " lgr 2,1 \n"
1007 " lgr 4,1 \n"
1008 " lgr 5,1 \n"
1009 "0: .long 0xb2ae0046 \n"
1010 "1: brc 2,0b \n"
1011 " brc 4,0b \n"
1012 " ipm %0 \n"
1013 " srl %0,28 \n"
1014 " iihh %0,0 \n"
1015 " iihl %0,0 \n"
1016 " lgr %1,1 \n"
1017 " st 4,0(3) \n"
1018 " st 5,4(3) \n"
1019 "2: \n"
1020 ".section .fixup,\"ax\" \n"
1021 "3: \n"
1022 " lhi %0,%h6 \n"
1023 " jg 2b \n"
1024 ".previous \n"
1025 ".section __ex_table,\"a\" \n"
1026 " .align 8 \n"
1027 " .quad 0b,3b \n"
1028 " .quad 1b,3b \n"
1029 ".previous"
1030 :"=d"(ccode),"=d"(*st)
1031 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1032 :"cc","0","1","2","3","4","5","6","7","memory");
1033#else
1034 (" lr 0,%2 \n"
1035 " lr 3,%4 \n"
1036 " lr 6,%3 \n"
1037 " lr 7,%5 \n"
1038 " lhi 1,128 \n"
1039 " sll 1,24 \n"
1040 " or 0,1 \n"
1041 " slr 1,1 \n"
1042 " lr 2,1 \n"
1043 " lr 4,1 \n"
1044 " lr 5,1 \n"
1045 "0: .long 0xb2ae0046 \n"
1046 "1: brc 2,0b \n"
1047 " brc 4,0b \n"
1048 " ipm %0 \n"
1049 " srl %0,28 \n"
1050 " lr %1,1 \n"
1051 " st 4,0(3) \n"
1052 " st 5,4(3) \n"
1053 "2: \n"
1054 ".section .fixup,\"ax\" \n"
1055 "3: \n"
1056 " lhi %0,%h6 \n"
1057 " bras 1,4f \n"
1058 " .long 2b \n"
1059 "4: \n"
1060 " l 1,0(1) \n"
1061 " br 1 \n"
1062 ".previous \n"
1063 ".section __ex_table,\"a\" \n"
1064 " .align 4 \n"
1065 " .long 0b,3b \n"
1066 " .long 1b,3b \n"
1067 ".previous"
1068 :"=d"(ccode),"=d"(*st)
1069 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1070 :"cc","0","1","2","3","4","5","6","7","memory");
1071#endif
1072 return ccode;
1073}
1074
1075static inline void
1076itoLe2(int *i_p, unsigned char *lechars)
1077{
1078 *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
1079 *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
1080}
1081
1082static inline void
1083le2toI(unsigned char *lechars, int *i_p)
1084{
1085 unsigned char *ic_p;
1086 *i_p = 0;
1087 ic_p = (unsigned char *) i_p;
1088 *(ic_p + 2) = *(lechars + 1);
1089 *(ic_p + 3) = *(lechars);
1090}
1091
1092static inline int
1093is_empty(unsigned char *ptr, int len)
1094{
1095 return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
1096}
1097
1098enum hdstat
1099query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1100{
1101 int q_nr, i, t_depth, t_dev_type;
1102 enum devstat ccode;
1103 struct ap_status_word stat_word;
1104 enum hdstat stat;
1105 int break_out;
1106
1107 q_nr = (deviceNr << SKIP_BITL) + cdx;
1108 stat = HD_BUSY;
1109 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1110 PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
1111 break_out = 0;
1112 for (i = 0; i < resetNr; i++) {
1113 if (ccode > 3) {
1114 PRINTKC("Exception testing device %d\n", i);
1115 return HD_TSQ_EXCEPTION;
1116 }
1117 switch (ccode) {
1118 case 0:
1119 PDEBUG("t_dev_type %d\n", t_dev_type);
1120 break_out = 1;
1121 stat = HD_ONLINE;
1122 *q_depth = t_depth + 1;
1123 switch (t_dev_type) {
1124 case PCICA_HW:
1125 *dev_type = PCICA;
1126 break;
1127 case PCICC_HW:
1128 *dev_type = PCICC;
1129 break;
1130 case PCIXCC_HW:
1131 *dev_type = PCIXCC_UNK;
1132 break;
1133 case CEX2C_HW:
1134 *dev_type = CEX2C;
1135 break;
1136 case CEX2A_HW:
1137 *dev_type = CEX2A;
1138 break;
1139 default:
1140 *dev_type = NILDEV;
1141 break;
1142 }
1143 PDEBUG("available device %d: Q depth = %d, dev "
1144 "type = %d, stat = %02X%02X%02X%02X\n",
1145 deviceNr, *q_depth, *dev_type,
1146 stat_word.q_stat_flags,
1147 stat_word.response_code,
1148 stat_word.reserved[0],
1149 stat_word.reserved[1]);
1150 break;
1151 case 3:
1152 switch (stat_word.response_code) {
1153 case AP_RESPONSE_NORMAL:
1154 stat = HD_ONLINE;
1155 break_out = 1;
1156 *q_depth = t_depth + 1;
1157 *dev_type = t_dev_type;
1158 PDEBUG("cc3, available device "
1159 "%d: Q depth = %d, dev "
1160 "type = %d, stat = "
1161 "%02X%02X%02X%02X\n",
1162 deviceNr, *q_depth,
1163 *dev_type,
1164 stat_word.q_stat_flags,
1165 stat_word.response_code,
1166 stat_word.reserved[0],
1167 stat_word.reserved[1]);
1168 break;
1169 case AP_RESPONSE_Q_NOT_AVAIL:
1170 stat = HD_NOT_THERE;
1171 break_out = 1;
1172 break;
1173 case AP_RESPONSE_RESET_IN_PROGRESS:
1174 PDEBUG("device %d in reset\n",
1175 deviceNr);
1176 break;
1177 case AP_RESPONSE_DECONFIGURED:
1178 stat = HD_DECONFIGURED;
1179 break_out = 1;
1180 break;
1181 case AP_RESPONSE_CHECKSTOPPED:
1182 stat = HD_CHECKSTOPPED;
1183 break_out = 1;
1184 break;
1185 case AP_RESPONSE_BUSY:
1186 PDEBUG("device %d busy\n",
1187 deviceNr);
1188 break;
1189 default:
1190 break;
1191 }
1192 break;
1193 default:
1194 stat = HD_NOT_THERE;
1195 break_out = 1;
1196 break;
1197 }
1198 if (break_out)
1199 break;
1200
1201 udelay(5);
1202
1203 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1204 }
1205 return stat;
1206}
1207
1208enum devstat
1209reset_device(int deviceNr, int cdx, int resetNr)
1210{
1211 int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
1212 struct ap_status_word stat_word;
1213 enum devstat stat;
1214 int break_out;
1215
1216 q_nr = (deviceNr << SKIP_BITL) + cdx;
1217 stat = DEV_GONE;
1218 ccode = resetq(q_nr, &stat_word);
1219 if (ccode > 3)
1220 return DEV_RSQ_EXCEPTION;
1221
1222 break_out = 0;
1223 for (i = 0; i < resetNr; i++) {
1224 switch (ccode) {
1225 case 0:
1226 stat = DEV_ONLINE;
1227 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1228 break_out = 1;
1229 break;
1230 case 3:
1231 switch (stat_word.response_code) {
1232 case AP_RESPONSE_NORMAL:
1233 stat = DEV_ONLINE;
1234 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1235 break_out = 1;
1236 break;
1237 case AP_RESPONSE_Q_NOT_AVAIL:
1238 case AP_RESPONSE_DECONFIGURED:
1239 case AP_RESPONSE_CHECKSTOPPED:
1240 stat = DEV_GONE;
1241 break_out = 1;
1242 break;
1243 case AP_RESPONSE_RESET_IN_PROGRESS:
1244 case AP_RESPONSE_BUSY:
1245 default:
1246 break;
1247 }
1248 break;
1249 default:
1250 stat = DEV_GONE;
1251 break_out = 1;
1252 break;
1253 }
1254 if (break_out == 1)
1255 break;
1256 udelay(5);
1257
1258 ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
1259 if (ccode > 3) {
1260 stat = DEV_TSQ_EXCEPTION;
1261 break;
1262 }
1263 }
1264 PDEBUG("Number of testq's needed for reset: %d\n", i);
1265
1266 if (i >= resetNr) {
1267 stat = DEV_GONE;
1268 }
1269
1270 return stat;
1271}
1272
1273#ifdef DEBUG_HYDRA_MSGS
1274static inline void
1275print_buffer(unsigned char *buffer, int bufflen)
1276{
1277 int i;
1278 for (i = 0; i < bufflen; i += 16) {
1279 PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
1280 "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
1281 buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
1282 buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
1283 buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
1284 buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
1285 }
1286}
1287#endif
1288
1289enum devstat
1290send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
1291{
1292 struct ap_status_word stat_word;
1293 enum devstat stat;
1294 int ccode;
1295 u32 *q_nr_p = (u32 *)msg_ext;
1296
1297 *q_nr_p = (dev_nr << SKIP_BITL) + cdx;
1298 PDEBUG("msg_len passed to sen: %d\n", msg_len);
1299 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
1300 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
1301 stat = DEV_GONE;
1302
1303#ifdef DEBUG_HYDRA_MSGS
1304 PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
1305 "%02X%02X%02X%02X\n",
1306 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
1307 msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
1308 msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
1309 print_buffer(msg_ext+CALLER_HEADER, msg_len);
1310#endif
1311
1312 ccode = sen(msg_len, msg_ext, &stat_word);
1313 if (ccode > 3)
1314 return DEV_SEN_EXCEPTION;
1315
1316 PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
1317 ccode, stat_word.q_stat_flags, stat_word.response_code,
1318 stat_word.reserved[0], stat_word.reserved[1]);
1319 switch (ccode) {
1320 case 0:
1321 stat = DEV_ONLINE;
1322 break;
1323 case 1:
1324 stat = DEV_GONE;
1325 break;
1326 case 3:
1327 switch (stat_word.response_code) {
1328 case AP_RESPONSE_NORMAL:
1329 stat = DEV_ONLINE;
1330 break;
1331 case AP_RESPONSE_Q_FULL:
1332 stat = DEV_QUEUE_FULL;
1333 break;
1334 default:
1335 stat = DEV_GONE;
1336 break;
1337 }
1338 break;
1339 default:
1340 stat = DEV_GONE;
1341 break;
1342 }
1343
1344 return stat;
1345}
1346
1347enum devstat
1348receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
1349 unsigned char *psmid)
1350{
1351 int ccode;
1352 struct ap_status_word stat_word;
1353 enum devstat stat;
1354
1355 memset(resp, 0x00, 8);
1356
1357 ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
1358 &stat_word);
1359 if (ccode > 3)
1360 return DEV_REC_EXCEPTION;
1361
1362 PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
1363 ccode, stat_word.q_stat_flags, stat_word.response_code,
1364 stat_word.reserved[0], stat_word.reserved[1]);
1365
1366 stat = DEV_GONE;
1367 switch (ccode) {
1368 case 0:
1369 stat = DEV_ONLINE;
1370#ifdef DEBUG_HYDRA_MSGS
1371 print_buffer(resp, resplen);
1372#endif
1373 break;
1374 case 3:
1375 switch (stat_word.response_code) {
1376 case AP_RESPONSE_NORMAL:
1377 stat = DEV_ONLINE;
1378 break;
1379 case AP_RESPONSE_NO_PENDING_REPLY:
1380 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1381 stat = DEV_EMPTY;
1382 else
1383 stat = DEV_NO_WORK;
1384 break;
1385 case AP_RESPONSE_INDEX_TOO_BIG:
1386 case AP_RESPONSE_NO_FIRST_PART:
1387 case AP_RESPONSE_MESSAGE_TOO_BIG:
1388 stat = DEV_BAD_MESSAGE;
1389 break;
1390 default:
1391 break;
1392 }
1393 break;
1394 default:
1395 break;
1396 }
1397
1398 return stat;
1399}
1400
1401static inline int
1402pad_msg(unsigned char *buffer, int totalLength, int msgLength)
1403{
1404 int pad_len;
1405
1406 for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
1407 if (buffer[pad_len] != 0x00)
1408 break;
1409 pad_len -= 3;
1410 if (pad_len < 8)
1411 return SEN_PAD_ERROR;
1412
1413 buffer[0] = 0x00;
1414 buffer[1] = 0x02;
1415
1416 memcpy(buffer+2, static_pad, pad_len);
1417
1418 buffer[pad_len + 2] = 0x00;
1419
1420 return 0;
1421}
1422
1423static inline int
1424is_common_public_key(unsigned char *key, int len)
1425{
1426 int i;
1427
1428 for (i = 0; i < len; i++)
1429 if (key[i])
1430 break;
1431 key += i;
1432 len -= i;
1433 if (((len == 1) && (key[0] == 3)) ||
1434 ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
1435 return 1;
1436
1437 return 0;
1438}
1439
1440static int
1441ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
1442 union type4_msg *z90cMsg_p)
1443{
1444 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
1445 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
1446 union type4_msg *tmp_type4_msg;
1447
1448 mod_len = icaMex_p->inputdatalength;
1449
1450 msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
1451 CALLER_HEADER;
1452
1453 memset(z90cMsg_p, 0, msg_size);
1454
1455 tmp_type4_msg = (union type4_msg *)
1456 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1457
1458 tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
1459 tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
1460
1461 if (mod_len <= 128) {
1462 tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
1463 tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
1464 mod_tgt = tmp_type4_msg->sme.modulus;
1465 mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
1466 exp_tgt = tmp_type4_msg->sme.exponent;
1467 exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
1468 inp_tgt = tmp_type4_msg->sme.message;
1469 inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
1470 } else {
1471 tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
1472 tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
1473 mod_tgt = tmp_type4_msg->lme.modulus;
1474 mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
1475 exp_tgt = tmp_type4_msg->lme.exponent;
1476 exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
1477 inp_tgt = tmp_type4_msg->lme.message;
1478 inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
1479 }
1480
1481 mod_tgt += (mod_tgt_len - mod_len);
1482 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
1483 return SEN_RELEASED;
1484 if (is_empty(mod_tgt, mod_len))
1485 return SEN_USER_ERROR;
1486 exp_tgt += (exp_tgt_len - mod_len);
1487 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
1488 return SEN_RELEASED;
1489 if (is_empty(exp_tgt, mod_len))
1490 return SEN_USER_ERROR;
1491 inp_tgt += (inp_tgt_len - mod_len);
1492 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
1493 return SEN_RELEASED;
1494 if (is_empty(inp_tgt, mod_len))
1495 return SEN_USER_ERROR;
1496
1497 *z90cMsg_l_p = msg_size - CALLER_HEADER;
1498
1499 return 0;
1500}
1501
1502static int
1503ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
1504 int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
1505{
1506 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
1507 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
1508 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
1509 union type4_msg *tmp_type4_msg;
1510
1511 mod_len = icaMsg_p->inputdatalength;
1512 short_len = mod_len / 2;
1513 long_len = mod_len / 2 + 8;
1514
1515 tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
1516 CALLER_HEADER;
1517
1518 memset(z90cMsg_p, 0, tmp_size);
1519
1520 tmp_type4_msg = (union type4_msg *)
1521 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1522
1523 tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
1524 tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
1525 if (mod_len <= 128) {
1526 tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
1527 tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
1528 p_tgt = tmp_type4_msg->scr.p;
1529 p_tgt_len = sizeof(tmp_type4_msg->scr.p);
1530 q_tgt = tmp_type4_msg->scr.q;
1531 q_tgt_len = sizeof(tmp_type4_msg->scr.q);
1532 dp_tgt = tmp_type4_msg->scr.dp;
1533 dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
1534 dq_tgt = tmp_type4_msg->scr.dq;
1535 dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
1536 u_tgt = tmp_type4_msg->scr.u;
1537 u_tgt_len = sizeof(tmp_type4_msg->scr.u);
1538 inp_tgt = tmp_type4_msg->scr.message;
1539 inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
1540 } else {
1541 tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
1542 tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
1543 p_tgt = tmp_type4_msg->lcr.p;
1544 p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
1545 q_tgt = tmp_type4_msg->lcr.q;
1546 q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
1547 dp_tgt = tmp_type4_msg->lcr.dp;
1548 dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
1549 dq_tgt = tmp_type4_msg->lcr.dq;
1550 dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
1551 u_tgt = tmp_type4_msg->lcr.u;
1552 u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
1553 inp_tgt = tmp_type4_msg->lcr.message;
1554 inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
1555 }
1556
1557 p_tgt += (p_tgt_len - long_len);
1558 if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
1559 return SEN_RELEASED;
1560 if (is_empty(p_tgt, long_len))
1561 return SEN_USER_ERROR;
1562 q_tgt += (q_tgt_len - short_len);
1563 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
1564 return SEN_RELEASED;
1565 if (is_empty(q_tgt, short_len))
1566 return SEN_USER_ERROR;
1567 dp_tgt += (dp_tgt_len - long_len);
1568 if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
1569 return SEN_RELEASED;
1570 if (is_empty(dp_tgt, long_len))
1571 return SEN_USER_ERROR;
1572 dq_tgt += (dq_tgt_len - short_len);
1573 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
1574 return SEN_RELEASED;
1575 if (is_empty(dq_tgt, short_len))
1576 return SEN_USER_ERROR;
1577 u_tgt += (u_tgt_len - long_len);
1578 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
1579 return SEN_RELEASED;
1580 if (is_empty(u_tgt, long_len))
1581 return SEN_USER_ERROR;
1582 inp_tgt += (inp_tgt_len - mod_len);
1583 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
1584 return SEN_RELEASED;
1585 if (is_empty(inp_tgt, mod_len))
1586 return SEN_USER_ERROR;
1587
1588 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1589
1590 return 0;
1591}
1592
1593static int
1594ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1595 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1596{
1597 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1598 unsigned char *temp;
1599 struct type6_hdr *tp6Hdr_p;
1600 struct CPRB *cprb_p;
1601 struct cca_private_ext_ME *key_p;
1602 static int deprecated_msg_count = 0;
1603
1604 mod_len = icaMsg_p->inputdatalength;
1605 tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
1606 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1607 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1608 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1609
1610 memset(z90cMsg_p, 0, tmp_size);
1611
1612 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1613 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1614 tp6Hdr_p = (struct type6_hdr *)temp;
1615 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1616 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1617
1618 temp += sizeof(struct type6_hdr);
1619 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1620 cprb_p = (struct CPRB *) temp;
1621 cprb_p->usage_domain[0]= (unsigned char)cdx;
1622 itoLe2(&parmBlock_l, cprb_p->req_parml);
1623 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1624
1625 temp += sizeof(struct CPRB);
1626 memcpy(temp, &static_pkd_function_and_rules,
1627 sizeof(struct function_and_rules_block));
1628
1629 temp += sizeof(struct function_and_rules_block);
1630 vud_len = 2 + icaMsg_p->inputdatalength;
1631 itoLe2(&vud_len, temp);
1632
1633 temp += 2;
1634 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
1635 return SEN_RELEASED;
1636 if (is_empty(temp, mod_len))
1637 return SEN_USER_ERROR;
1638
1639 temp += mod_len;
1640 memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
1641
1642 temp += sizeof(struct T6_keyBlock_hdr);
1643 memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
1644 key_p = (struct cca_private_ext_ME *)temp;
1645 temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
1646 - mod_len;
1647 if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
1648 return SEN_RELEASED;
1649 if (is_empty(temp, mod_len))
1650 return SEN_USER_ERROR;
1651
1652 if (is_common_public_key(temp, mod_len)) {
1653 if (deprecated_msg_count < 20) {
1654 PRINTK("Common public key used for modex decrypt\n");
1655 deprecated_msg_count++;
1656 if (deprecated_msg_count == 20)
1657 PRINTK("No longer issuing messages about common"
1658 " public key for modex decrypt.\n");
1659 }
1660 return SEN_NOT_AVAIL;
1661 }
1662
1663 temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
1664 - mod_len;
1665 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1666 return SEN_RELEASED;
1667 if (is_empty(temp, mod_len))
1668 return SEN_USER_ERROR;
1669
1670 key_p->pubMESec.modulus_bit_len = 8 * mod_len;
1671
1672 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1673
1674 return 0;
1675}
1676
1677static int
1678ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1679 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1680{
1681 int mod_len, vud_len, exp_len, key_len;
1682 int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
1683 unsigned char *temp_exp, *exp_p, *temp;
1684 struct type6_hdr *tp6Hdr_p;
1685 struct CPRB *cprb_p;
1686 struct cca_public_key *key_p;
1687 struct T6_keyBlock_hdr *keyb_p;
1688
1689 temp_exp = kmalloc(256, GFP_KERNEL);
1690 if (!temp_exp)
1691 return EGETBUFF;
1692 mod_len = icaMsg_p->inputdatalength;
1693 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1694 kfree(temp_exp);
1695 return SEN_RELEASED;
1696 }
1697 if (is_empty(temp_exp, mod_len)) {
1698 kfree(temp_exp);
1699 return SEN_USER_ERROR;
1700 }
1701
1702 exp_p = temp_exp;
1703 for (i = 0; i < mod_len; i++)
1704 if (exp_p[i])
1705 break;
1706 if (i >= mod_len) {
1707 kfree(temp_exp);
1708 return SEN_USER_ERROR;
1709 }
1710
1711 exp_len = mod_len - i;
1712 exp_p += i;
1713
1714 PDEBUG("exp_len after computation: %08x\n", exp_len);
1715 tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
1716 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1717 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1718 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1719
1720 vud_len = 2 + mod_len;
1721 memset(z90cMsg_p, 0, tmp_size);
1722
1723 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1724 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1725 tp6Hdr_p = (struct type6_hdr *)temp;
1726 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1727 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1728 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1729 sizeof(static_PKE_function_code));
1730 temp += sizeof(struct type6_hdr);
1731 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1732 cprb_p = (struct CPRB *) temp;
1733 cprb_p->usage_domain[0]= (unsigned char)cdx;
1734 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1735 temp += sizeof(struct CPRB);
1736 memcpy(temp, &static_pke_function_and_rules,
1737 sizeof(struct function_and_rules_block));
1738 temp += sizeof(struct function_and_rules_block);
1739 temp += 2;
1740 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
1741 kfree(temp_exp);
1742 return SEN_RELEASED;
1743 }
1744 if (is_empty(temp, mod_len)) {
1745 kfree(temp_exp);
1746 return SEN_USER_ERROR;
1747 }
1748 if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
1749 kfree(temp_exp);
1750 return SEN_NOT_AVAIL;
1751 }
1752 for (i = 2; i < mod_len; i++)
1753 if (temp[i] == 0x00)
1754 break;
1755 if ((i < 9) || (i > (mod_len - 2))) {
1756 kfree(temp_exp);
1757 return SEN_NOT_AVAIL;
1758 }
1759 pad_len = i + 1;
1760 vud_len = mod_len - pad_len;
1761 memmove(temp, temp+pad_len, vud_len);
1762 temp -= 2;
1763 vud_len += 2;
1764 itoLe2(&vud_len, temp);
1765 temp += (vud_len);
1766 keyb_p = (struct T6_keyBlock_hdr *)temp;
1767 temp += sizeof(struct T6_keyBlock_hdr);
1768 memcpy(temp, &static_public_key, sizeof(static_public_key));
1769 key_p = (struct cca_public_key *)temp;
1770 temp = key_p->pubSec.exponent;
1771 memcpy(temp, exp_p, exp_len);
1772 kfree(temp_exp);
1773 temp += exp_len;
1774 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1775 return SEN_RELEASED;
1776 if (is_empty(temp, mod_len))
1777 return SEN_USER_ERROR;
1778 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1779 key_p->pubSec.modulus_byte_len = mod_len;
1780 key_p->pubSec.exponent_len = exp_len;
1781 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1782 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1783 key_p->pubHdr.token_length = key_len;
1784 key_len += 4;
1785 itoLe2(&key_len, keyb_p->ulen);
1786 key_len += 2;
1787 itoLe2(&key_len, keyb_p->blen);
1788 parmBlock_l -= pad_len;
1789 itoLe2(&parmBlock_l, cprb_p->req_parml);
1790 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1791
1792 return 0;
1793}
1794
1795static int
1796ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
1797 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1798{
1799 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
1800 int long_len, pad_len, keyPartsLen, tmp_l;
1801 unsigned char *tgt_p, *temp;
1802 struct type6_hdr *tp6Hdr_p;
1803 struct CPRB *cprb_p;
1804 struct cca_token_hdr *keyHdr_p;
1805 struct cca_pvt_ext_CRT_sec *pvtSec_p;
1806 struct cca_public_sec *pubSec_p;
1807
1808 mod_len = icaMsg_p->inputdatalength;
1809 short_len = mod_len / 2;
1810 long_len = 8 + short_len;
1811 keyPartsLen = 3 * long_len + 2 * short_len;
1812 pad_len = (8 - (keyPartsLen % 8)) % 8;
1813 keyPartsLen += pad_len + mod_len;
1814 tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
1815 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1816 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1817 vud_len = 2 + mod_len;
1818 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1819
1820 memset(z90cMsg_p, 0, tmp_size);
1821 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1822 memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
1823 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1824 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1825 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1826 tgt_p += sizeof(struct type6_hdr);
1827 cprb_p = (struct CPRB *) tgt_p;
1828 memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
1829 cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
1830 itoLe2(&parmBlock_l, cprb_p->req_parml);
1831 memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
1832 sizeof(cprb_p->req_parml));
1833 tgt_p += sizeof(struct CPRB);
1834 memcpy(tgt_p, &static_pkd_function_and_rules,
1835 sizeof(struct function_and_rules_block));
1836 tgt_p += sizeof(struct function_and_rules_block);
1837 itoLe2(&vud_len, tgt_p);
1838 tgt_p += 2;
1839 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
1840 return SEN_RELEASED;
1841 if (is_empty(tgt_p, mod_len))
1842 return SEN_USER_ERROR;
1843 tgt_p += mod_len;
1844 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
1845 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
1846 itoLe2(&tmp_l, tgt_p);
1847 temp = tgt_p + 2;
1848 tmp_l -= 2;
1849 itoLe2(&tmp_l, temp);
1850 tgt_p += sizeof(struct T6_keyBlock_hdr);
1851 keyHdr_p = (struct cca_token_hdr *)tgt_p;
1852 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
1853 tmp_l -= 4;
1854 keyHdr_p->token_length = tmp_l;
1855 tgt_p += sizeof(struct cca_token_hdr);
1856 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
1857 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
1858 pvtSec_p->section_length =
1859 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
1860 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
1861 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
1862 pvtSec_p->p_len = long_len;
1863 pvtSec_p->q_len = short_len;
1864 pvtSec_p->dp_len = long_len;
1865 pvtSec_p->dq_len = short_len;
1866 pvtSec_p->u_len = long_len;
1867 pvtSec_p->mod_len = mod_len;
1868 pvtSec_p->pad_len = pad_len;
1869 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
1870 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
1871 return SEN_RELEASED;
1872 if (is_empty(tgt_p, long_len))
1873 return SEN_USER_ERROR;
1874 tgt_p += long_len;
1875 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
1876 return SEN_RELEASED;
1877 if (is_empty(tgt_p, short_len))
1878 return SEN_USER_ERROR;
1879 tgt_p += short_len;
1880 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
1881 return SEN_RELEASED;
1882 if (is_empty(tgt_p, long_len))
1883 return SEN_USER_ERROR;
1884 tgt_p += long_len;
1885 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
1886 return SEN_RELEASED;
1887 if (is_empty(tgt_p, short_len))
1888 return SEN_USER_ERROR;
1889 tgt_p += short_len;
1890 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
1891 return SEN_RELEASED;
1892 if (is_empty(tgt_p, long_len))
1893 return SEN_USER_ERROR;
1894 tgt_p += long_len;
1895 tgt_p += pad_len;
1896 memset(tgt_p, 0xFF, mod_len);
1897 tgt_p += mod_len;
1898 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
1899 pubSec_p = (struct cca_public_sec *) tgt_p;
1900 pubSec_p->modulus_bit_len = 8 * mod_len;
1901 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1902
1903 return 0;
1904}
1905
1906static int
1907ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1908 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
1909 int dev_type)
1910{
1911 int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1912 int key_len, i;
1913 unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
1914 struct type6_hdr *tp6Hdr_p;
1915 struct CPRBX *cprbx_p;
1916 struct cca_public_key *key_p;
1917 struct T6_keyBlock_hdrX *keyb_p;
1918
1919 temp_exp = kmalloc(256, GFP_KERNEL);
1920 if (!temp_exp)
1921 return EGETBUFF;
1922 mod_len = icaMsg_p->inputdatalength;
1923 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1924 kfree(temp_exp);
1925 return SEN_RELEASED;
1926 }
1927 if (is_empty(temp_exp, mod_len)) {
1928 kfree(temp_exp);
1929 return SEN_USER_ERROR;
1930 }
1931 exp_p = temp_exp;
1932 for (i = 0; i < mod_len; i++)
1933 if (exp_p[i])
1934 break;
1935 if (i >= mod_len) {
1936 kfree(temp_exp);
1937 return SEN_USER_ERROR;
1938 }
1939 exp_len = mod_len - i;
1940 exp_p += i;
1941 PDEBUG("exp_len after computation: %08x\n", exp_len);
1942 tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
1943 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1944 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
1945 tmp_size = tmp_size + CALLER_HEADER;
1946 vud_len = 2 + mod_len;
1947 memset(z90cMsg_p, 0, tmp_size);
1948 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1949 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
1950 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1951 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
1952 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
1953 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1954 sizeof(static_PKE_function_code));
1955 tgt_p += sizeof(struct type6_hdr);
1956 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
1957 cprbx_p = (struct CPRBX *) tgt_p;
1958 cprbx_p->domain = (unsigned short)cdx;
1959 cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
1960 tgt_p += sizeof(struct CPRBX);
1961 if (dev_type == PCIXCC_MCL2)
1962 memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
1963 sizeof(struct function_and_rules_block));
1964 else
1965 memcpy(tgt_p, &static_pke_function_and_rulesX,
1966 sizeof(struct function_and_rules_block));
1967 tgt_p += sizeof(struct function_and_rules_block);
1968
1969 tgt_p += 2;
1970 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
1971 kfree(temp_exp);
1972 return SEN_RELEASED;
1973 }
1974 if (is_empty(tgt_p, mod_len)) {
1975 kfree(temp_exp);
1976 return SEN_USER_ERROR;
1977 }
1978 tgt_p -= 2;
1979 *((short *)tgt_p) = (short) vud_len;
1980 tgt_p += vud_len;
1981 keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
1982 tgt_p += sizeof(struct T6_keyBlock_hdrX);
1983 memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
1984 key_p = (struct cca_public_key *)tgt_p;
1985 temp = key_p->pubSec.exponent;
1986 memcpy(temp, exp_p, exp_len);
1987 kfree(temp_exp);
1988 temp += exp_len;
1989 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1990 return SEN_RELEASED;
1991 if (is_empty(temp, mod_len))
1992 return SEN_USER_ERROR;
1993 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1994 key_p->pubSec.modulus_byte_len = mod_len;
1995 key_p->pubSec.exponent_len = exp_len;
1996 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1997 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1998 key_p->pubHdr.token_length = key_len;
1999 key_len += 4;
2000 keyb_p->ulen = (unsigned short)key_len;
2001 key_len += 2;
2002 keyb_p->blen = (unsigned short)key_len;
2003 cprbx_p->req_parml = parmBlock_l;
2004 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2005
2006 return 0;
2007}
2008
2009static int
2010ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2011 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
2012 int dev_type)
2013{
2014 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
2015 int long_len, pad_len, keyPartsLen, tmp_l;
2016 unsigned char *tgt_p, *temp;
2017 struct type6_hdr *tp6Hdr_p;
2018 struct CPRBX *cprbx_p;
2019 struct cca_token_hdr *keyHdr_p;
2020 struct cca_pvt_ext_CRT_sec *pvtSec_p;
2021 struct cca_public_sec *pubSec_p;
2022
2023 mod_len = icaMsg_p->inputdatalength;
2024 short_len = mod_len / 2;
2025 long_len = 8 + short_len;
2026 keyPartsLen = 3 * long_len + 2 * short_len;
2027 pad_len = (8 - (keyPartsLen % 8)) % 8;
2028 keyPartsLen += pad_len + mod_len;
2029 tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
2030 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
2031 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
2032 vud_len = 2 + mod_len;
2033 tmp_size = tmp_size + CALLER_HEADER;
2034 memset(z90cMsg_p, 0, tmp_size);
2035 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
2036 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
2037 tp6Hdr_p = (struct type6_hdr *)tgt_p;
2038 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
2039 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
2040 tgt_p += sizeof(struct type6_hdr);
2041 cprbx_p = (struct CPRBX *) tgt_p;
2042 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
2043 cprbx_p->domain = (unsigned short)cdx;
2044 cprbx_p->req_parml = parmBlock_l;
2045 cprbx_p->rpl_msgbl = parmBlock_l;
2046 tgt_p += sizeof(struct CPRBX);
2047 if (dev_type == PCIXCC_MCL2)
2048 memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
2049 sizeof(struct function_and_rules_block));
2050 else
2051 memcpy(tgt_p, &static_pkd_function_and_rulesX,
2052 sizeof(struct function_and_rules_block));
2053 tgt_p += sizeof(struct function_and_rules_block);
2054 *((short *)tgt_p) = (short) vud_len;
2055 tgt_p += 2;
2056 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
2057 return SEN_RELEASED;
2058 if (is_empty(tgt_p, mod_len))
2059 return SEN_USER_ERROR;
2060 tgt_p += mod_len;
2061 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
2062 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
2063 *((short *)tgt_p) = (short) tmp_l;
2064 temp = tgt_p + 2;
2065 tmp_l -= 2;
2066 *((short *)temp) = (short) tmp_l;
2067 tgt_p += sizeof(struct T6_keyBlock_hdr);
2068 keyHdr_p = (struct cca_token_hdr *)tgt_p;
2069 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
2070 tmp_l -= 4;
2071 keyHdr_p->token_length = tmp_l;
2072 tgt_p += sizeof(struct cca_token_hdr);
2073 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
2074 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
2075 pvtSec_p->section_length =
2076 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
2077 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
2078 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
2079 pvtSec_p->p_len = long_len;
2080 pvtSec_p->q_len = short_len;
2081 pvtSec_p->dp_len = long_len;
2082 pvtSec_p->dq_len = short_len;
2083 pvtSec_p->u_len = long_len;
2084 pvtSec_p->mod_len = mod_len;
2085 pvtSec_p->pad_len = pad_len;
2086 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
2087 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
2088 return SEN_RELEASED;
2089 if (is_empty(tgt_p, long_len))
2090 return SEN_USER_ERROR;
2091 tgt_p += long_len;
2092 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
2093 return SEN_RELEASED;
2094 if (is_empty(tgt_p, short_len))
2095 return SEN_USER_ERROR;
2096 tgt_p += short_len;
2097 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
2098 return SEN_RELEASED;
2099 if (is_empty(tgt_p, long_len))
2100 return SEN_USER_ERROR;
2101 tgt_p += long_len;
2102 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
2103 return SEN_RELEASED;
2104 if (is_empty(tgt_p, short_len))
2105 return SEN_USER_ERROR;
2106 tgt_p += short_len;
2107 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
2108 return SEN_RELEASED;
2109 if (is_empty(tgt_p, long_len))
2110 return SEN_USER_ERROR;
2111 tgt_p += long_len;
2112 tgt_p += pad_len;
2113 memset(tgt_p, 0xFF, mod_len);
2114 tgt_p += mod_len;
2115 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
2116 pubSec_p = (struct cca_public_sec *) tgt_p;
2117 pubSec_p->modulus_bit_len = 8 * mod_len;
2118 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2119
2120 return 0;
2121}
2122
2123static int
2124ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2125 union type50_msg *z90cMsg_p)
2126{
2127 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2128 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2129 union type50_msg *tmp_type50_msg;
2130
2131 mod_len = icaMex_p->inputdatalength;
2132
2133 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2134 CALLER_HEADER;
2135
2136 memset(z90cMsg_p, 0, msg_size);
2137
2138 tmp_type50_msg = (union type50_msg *)
2139 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2140
2141 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2142
2143 if (mod_len <= 128) {
2144 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2145 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2146 mod_tgt = tmp_type50_msg->meb1.modulus;
2147 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2148 exp_tgt = tmp_type50_msg->meb1.exponent;
2149 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2150 inp_tgt = tmp_type50_msg->meb1.message;
2151 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2152 } else {
2153 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2154 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2155 mod_tgt = tmp_type50_msg->meb2.modulus;
2156 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2157 exp_tgt = tmp_type50_msg->meb2.exponent;
2158 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2159 inp_tgt = tmp_type50_msg->meb2.message;
2160 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2161 }
2162
2163 mod_tgt += (mod_tgt_len - mod_len);
2164 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2165 return SEN_RELEASED;
2166 if (is_empty(mod_tgt, mod_len))
2167 return SEN_USER_ERROR;
2168 exp_tgt += (exp_tgt_len - mod_len);
2169 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2170 return SEN_RELEASED;
2171 if (is_empty(exp_tgt, mod_len))
2172 return SEN_USER_ERROR;
2173 inp_tgt += (inp_tgt_len - mod_len);
2174 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2175 return SEN_RELEASED;
2176 if (is_empty(inp_tgt, mod_len))
2177 return SEN_USER_ERROR;
2178
2179 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2180
2181 return 0;
2182}
2183
2184static int
2185ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2186 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2187{
2188 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2189 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2190 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2191 temp[8];
2192 union type50_msg *tmp_type50_msg;
2193
2194 mod_len = icaMsg_p->inputdatalength;
2195 short_len = mod_len / 2;
2196 long_len = mod_len / 2 + 8;
2197 long_offset = 0;
2198
2199 if (long_len > 128) {
2200 memset(temp, 0x00, sizeof(temp));
2201 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2202 return SEN_RELEASED;
2203 if (!is_empty(temp, 8))
2204 return SEN_NOT_AVAIL;
2205 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2206 return SEN_RELEASED;
2207 if (!is_empty(temp, 8))
2208 return SEN_NOT_AVAIL;
2209 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2210 return SEN_RELEASED;
2211 if (!is_empty(temp, 8))
2212 return SEN_NOT_AVAIL;
2213 long_offset = long_len - 128;
2214 long_len = 128;
2215 }
2216
2217 tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2218 CALLER_HEADER;
2219
2220 memset(z90cMsg_p, 0, tmp_size);
2221
2222 tmp_type50_msg = (union type50_msg *)
2223 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2224
2225 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2226 if (long_len <= 64) {
2227 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2228 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2229 p_tgt = tmp_type50_msg->crb1.p;
2230 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2231 q_tgt = tmp_type50_msg->crb1.q;
2232 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2233 dp_tgt = tmp_type50_msg->crb1.dp;
2234 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2235 dq_tgt = tmp_type50_msg->crb1.dq;
2236 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2237 u_tgt = tmp_type50_msg->crb1.u;
2238 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2239 inp_tgt = tmp_type50_msg->crb1.message;
2240 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2241 } else {
2242 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2243 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2244 p_tgt = tmp_type50_msg->crb2.p;
2245 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2246 q_tgt = tmp_type50_msg->crb2.q;
2247 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2248 dp_tgt = tmp_type50_msg->crb2.dp;
2249 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2250 dq_tgt = tmp_type50_msg->crb2.dq;
2251 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2252 u_tgt = tmp_type50_msg->crb2.u;
2253 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2254 inp_tgt = tmp_type50_msg->crb2.message;
2255 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2256 }
2257
2258 p_tgt += (p_tgt_len - long_len);
2259 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2260 return SEN_RELEASED;
2261 if (is_empty(p_tgt, long_len))
2262 return SEN_USER_ERROR;
2263 q_tgt += (q_tgt_len - short_len);
2264 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2265 return SEN_RELEASED;
2266 if (is_empty(q_tgt, short_len))
2267 return SEN_USER_ERROR;
2268 dp_tgt += (dp_tgt_len - long_len);
2269 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2270 return SEN_RELEASED;
2271 if (is_empty(dp_tgt, long_len))
2272 return SEN_USER_ERROR;
2273 dq_tgt += (dq_tgt_len - short_len);
2274 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2275 return SEN_RELEASED;
2276 if (is_empty(dq_tgt, short_len))
2277 return SEN_USER_ERROR;
2278 u_tgt += (u_tgt_len - long_len);
2279 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2280 return SEN_RELEASED;
2281 if (is_empty(u_tgt, long_len))
2282 return SEN_USER_ERROR;
2283 inp_tgt += (inp_tgt_len - mod_len);
2284 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2285 return SEN_RELEASED;
2286 if (is_empty(inp_tgt, mod_len))
2287 return SEN_USER_ERROR;
2288
2289 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2290
2291 return 0;
2292}
2293
2294int
2295convert_request(unsigned char *buffer, int func, unsigned short function,
2296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
2297{
2298 if (dev_type == PCICA) {
2299 if (func == ICARSACRT)
2300 return ICACRT_msg_to_type4CRT_msg(
2301 (struct ica_rsa_modexpo_crt *) buffer,
2302 msg_l_p, (union type4_msg *) msg_p);
2303 else
2304 return ICAMEX_msg_to_type4MEX_msg(
2305 (struct ica_rsa_modexpo *) buffer,
2306 msg_l_p, (union type4_msg *) msg_p);
2307 }
2308 if (dev_type == PCICC) {
2309 if (func == ICARSACRT)
2310 return ICACRT_msg_to_type6CRT_msg(
2311 (struct ica_rsa_modexpo_crt *) buffer,
2312 cdx, msg_l_p, (struct type6_msg *)msg_p);
2313 if (function == PCI_FUNC_KEY_ENCRYPT)
2314 return ICAMEX_msg_to_type6MEX_en_msg(
2315 (struct ica_rsa_modexpo *) buffer,
2316 cdx, msg_l_p, (struct type6_msg *) msg_p);
2317 else
2318 return ICAMEX_msg_to_type6MEX_de_msg(
2319 (struct ica_rsa_modexpo *) buffer,
2320 cdx, msg_l_p, (struct type6_msg *) msg_p);
2321 }
2322 if ((dev_type == PCIXCC_MCL2) ||
2323 (dev_type == PCIXCC_MCL3) ||
2324 (dev_type == CEX2C)) {
2325 if (func == ICARSACRT)
2326 return ICACRT_msg_to_type6CRT_msgX(
2327 (struct ica_rsa_modexpo_crt *) buffer,
2328 cdx, msg_l_p, (struct type6_msg *) msg_p,
2329 dev_type);
2330 else
2331 return ICAMEX_msg_to_type6MEX_msgX(
2332 (struct ica_rsa_modexpo *) buffer,
2333 cdx, msg_l_p, (struct type6_msg *) msg_p,
2334 dev_type);
2335 }
2336 if (dev_type == CEX2A) {
2337 if (func == ICARSACRT)
2338 return ICACRT_msg_to_type50CRT_msg(
2339 (struct ica_rsa_modexpo_crt *) buffer,
2340 msg_l_p, (union type50_msg *) msg_p);
2341 else
2342 return ICAMEX_msg_to_type50MEX_msg(
2343 (struct ica_rsa_modexpo *) buffer,
2344 msg_l_p, (union type50_msg *) msg_p);
2345 }
2346
2347 return 0;
2348}
2349
2350int ext_bitlens_msg_count = 0;
2351static inline void
2352unset_ext_bitlens(void)
2353{
2354 if (!ext_bitlens_msg_count) {
2355 PRINTK("Unable to use coprocessors for extended bitlengths. "
2356 "Using PCICAs/CEX2As (if present) for extended "
2357 "bitlengths. This is not an error.\n");
2358 ext_bitlens_msg_count++;
2359 }
2360 ext_bitlens = 0;
2361}
2362
2363int
2364convert_response(unsigned char *response, unsigned char *buffer,
2365 int *respbufflen_p, unsigned char *resp_buff)
2366{
2367 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2368 struct error_hdr *errh_p = (struct error_hdr *) response;
2369 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2370 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2371 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2372 int reply_code, service_rc, service_rs, src_l;
2373 unsigned char *src_p, *tgt_p;
2374 struct CPRB *cprb_p;
2375 struct CPRBX *cprbx_p;
2376
2377 src_p = 0;
2378 reply_code = 0;
2379 service_rc = 0;
2380 service_rs = 0;
2381 src_l = 0;
2382 switch (errh_p->type) {
2383 case TYPE82_RSP_CODE:
2384 case TYPE88_RSP_CODE:
2385 reply_code = errh_p->reply_code;
2386 src_p = (unsigned char *)errh_p;
2387 PRINTK("Hardware error: Type %02X Message Header: "
2388 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2389 errh_p->type,
2390 src_p[0], src_p[1], src_p[2], src_p[3],
2391 src_p[4], src_p[5], src_p[6], src_p[7]);
2392 break;
2393 case TYPE80_RSP_CODE:
2394 src_l = icaMsg_p->outputdatalength;
2395 src_p = response + (int)t80h_p->len - src_l;
2396 break;
2397 case TYPE84_RSP_CODE:
2398 src_l = icaMsg_p->outputdatalength;
2399 src_p = response + (int)t84h_p->len - src_l;
2400 break;
2401 case TYPE86_RSP_CODE:
2402 reply_code = t86m_p->header.reply_code;
2403 if (reply_code != 0)
2404 break;
2405 cprb_p = (struct CPRB *)
2406 (response + sizeof(struct type86_fmt2_msg));
2407 cprbx_p = (struct CPRBX *) cprb_p;
2408 if (cprb_p->cprb_ver_id != 0x02) {
2409 le2toI(cprb_p->ccp_rtcode, &service_rc);
2410 if (service_rc != 0) {
2411 le2toI(cprb_p->ccp_rscode, &service_rs);
2412 if ((service_rc == 8) && (service_rs == 66))
2413 PDEBUG("Bad block format on PCICC\n");
2414 else if ((service_rc == 8) && (service_rs == 65))
2415 PDEBUG("Probably an even modulus on "
2416 "PCICC\n");
2417 else if ((service_rc == 8) && (service_rs == 770)) {
2418 PDEBUG("Invalid key length on PCICC\n");
2419 unset_ext_bitlens();
2420 return REC_USE_PCICA;
2421 }
2422 else if ((service_rc == 8) && (service_rs == 783)) {
2423 PDEBUG("Extended bitlengths not enabled"
2424 "on PCICC\n");
2425 unset_ext_bitlens();
2426 return REC_USE_PCICA;
2427 }
2428 else
2429 PRINTK("service rc/rs (PCICC): %d/%d\n",
2430 service_rc, service_rs);
2431 return REC_OPERAND_INV;
2432 }
2433 src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
2434 src_p += 4;
2435 le2toI(src_p, &src_l);
2436 src_l -= 2;
2437 src_p += 2;
2438 } else {
2439 service_rc = (int)cprbx_p->ccp_rtcode;
2440 if (service_rc != 0) {
2441 service_rs = (int) cprbx_p->ccp_rscode;
2442 if ((service_rc == 8) && (service_rs == 66))
2443 PDEBUG("Bad block format on PCIXCC\n");
2444 else if ((service_rc == 8) && (service_rs == 65))
2445 PDEBUG("Probably an even modulus on "
2446 "PCIXCC\n");
2447 else if ((service_rc == 8) && (service_rs == 770)) {
2448 PDEBUG("Invalid key length on PCIXCC\n");
2449 unset_ext_bitlens();
2450 return REC_USE_PCICA;
2451 }
2452 else if ((service_rc == 8) && (service_rs == 783)) {
2453 PDEBUG("Extended bitlengths not enabled"
2454 "on PCIXCC\n");
2455 unset_ext_bitlens();
2456 return REC_USE_PCICA;
2457 }
2458 else
2459 PRINTK("service rc/rs (PCIXCC): %d/%d\n",
2460 service_rc, service_rs);
2461 return REC_OPERAND_INV;
2462 }
2463 src_p = (unsigned char *)
2464 cprbx_p + sizeof(struct CPRBX);
2465 src_p += 4;
2466 src_l = (int)(*((short *) src_p));
2467 src_l -= 2;
2468 src_p += 2;
2469 }
2470 break;
2471 default:
2472 src_p = (unsigned char *)errh_p;
2473 PRINTK("Unrecognized Message Header: "
2474 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2475 src_p[0], src_p[1], src_p[2], src_p[3],
2476 src_p[4], src_p[5], src_p[6], src_p[7]);
2477 return REC_BAD_MESSAGE;
2478 }
2479
2480 if (reply_code)
2481 switch (reply_code) {
2482 case REP82_ERROR_MACHINE_FAILURE:
2483 if (errh_p->type == TYPE82_RSP_CODE)
2484 PRINTKW("Machine check failure\n");
2485 else
2486 PRINTKW("Module failure\n");
2487 return REC_HARDWAR_ERR;
2488 case REP82_ERROR_OPERAND_INVALID:
2489 return REC_OPERAND_INV;
2490 case REP88_ERROR_MESSAGE_MALFORMD:
2491 PRINTKW("Message malformed\n");
2492 return REC_OPERAND_INV;
2493 case REP82_ERROR_OPERAND_SIZE:
2494 return REC_OPERAND_SIZE;
2495 case REP82_ERROR_EVEN_MOD_IN_OPND:
2496 return REC_EVEN_MOD;
2497 case REP82_ERROR_MESSAGE_TYPE:
2498 return WRONG_DEVICE_TYPE;
2499 case REP82_ERROR_TRANSPORT_FAIL:
2500 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
2501 t86m_p->apfs[0], t86m_p->apfs[1],
2502 t86m_p->apfs[2], t86m_p->apfs[3]);
2503 return REC_HARDWAR_ERR;
2504 default:
2505 PRINTKW("reply code = %d\n", reply_code);
2506 return REC_HARDWAR_ERR;
2507 }
2508
2509 if (service_rc != 0)
2510 return REC_OPERAND_INV;
2511
2512 if ((src_l > icaMsg_p->outputdatalength) ||
2513 (src_l > RESPBUFFSIZE) ||
2514 (src_l <= 0))
2515 return REC_OPERAND_SIZE;
2516
2517 PDEBUG("Length returned = %d\n", src_l);
2518 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
2519 memcpy(tgt_p, src_p, src_l);
2520 if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
2521 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
2522 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
2523 return REC_INVALID_PAD;
2524 }
2525 *respbufflen_p = icaMsg_p->outputdatalength;
2526 if (*respbufflen_p == 0)
2527 PRINTK("Zero *respbufflen_p\n");
2528
2529 return 0;
2530}
2531
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
deleted file mode 100644
index b2f20ab8431a..000000000000
--- a/drivers/s390/crypto/z90main.c
+++ /dev/null
@@ -1,3379 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/miscdevice.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/proc_fs.h>
37#include <linux/syscalls.h>
38#include "z90crypt.h"
39#include "z90common.h"
40
41/**
42 * Defaults that may be modified.
43 */
44
45/**
46 * You can specify a different minor at compile time.
47 */
48#ifndef Z90CRYPT_MINOR
49#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
50#endif
51
52/**
53 * You can specify a different domain at compile time or on the insmod
54 * command line.
55 */
56#ifndef DOMAIN_INDEX
57#define DOMAIN_INDEX -1
58#endif
59
60/**
61 * This is the name under which the device is registered in /proc/modules.
62 */
63#define REG_NAME "z90crypt"
64
65/**
66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
67 * older than CLEANUPTIME seconds in the past.
68 */
69#ifndef CLEANUPTIME
70#define CLEANUPTIME 15
71#endif
72
73/**
74 * Config should run every CONFIGTIME seconds
75 */
76#ifndef CONFIGTIME
77#define CONFIGTIME 30
78#endif
79
80/**
81 * The first execution of the config task should take place
82 * immediately after initialization
83 */
84#ifndef INITIAL_CONFIGTIME
85#define INITIAL_CONFIGTIME 1
86#endif
87
88/**
89 * Reader should run every READERTIME milliseconds
90 * With the 100Hz patch for s390, z90crypt can lock the system solid while
91 * under heavy load. We'll try to avoid that.
92 */
93#ifndef READERTIME
94#if HZ > 1000
95#define READERTIME 2
96#else
97#define READERTIME 10
98#endif
99#endif
100
101/**
102 * turn long device array index into device pointer
103 */
104#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
105
106/**
107 * turn short device array index into long device array index
108 */
109#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
110
111/**
112 * turn short device array index into device pointer
113 */
114#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
115
116/**
117 * Status for a work-element
118 */
119#define STAT_DEFAULT 0x00 // request has not been processed
120
121#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
122 // else, device is determined each write
123#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
124 // before being sent to the hardware.
125#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
126// 0x20 // UNUSED state
127#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
128#define STAT_NOWORK 0x00 // bits off: no work on any queue
129#define STAT_RDWRMASK 0x30 // mask for bits 5-4
130
131/**
132 * Macros to check the status RDWRMASK
133 */
134#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
135#define SET_RDWRMASK(statbyte, newval) \
136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
137
138/**
139 * Audit Trail. Progress of a Work element
140 * audit[0]: Unless noted otherwise, these bits are all set by the process
141 */
142#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
143#define FP_BUFFREQ 0x40 // Low Level buffer requested
144#define FP_BUFFGOT 0x20 // Low Level buffer obtained
145#define FP_SENT 0x10 // Work element sent to a crypto device
146 // (may be set by process or by reader task)
147#define FP_PENDING 0x08 // Work element placed on pending queue
148 // (may be set by process or by reader task)
149#define FP_REQUEST 0x04 // Work element placed on request queue
150#define FP_ASLEEP 0x02 // Work element about to sleep
151#define FP_AWAKE 0x01 // Work element has been awakened
152
153/**
154 * audit[1]: These bits are set by the reader task and/or the cleanup task
155 */
156#define FP_NOTPENDING 0x80 // Work element removed from pending queue
157#define FP_AWAKENING 0x40 // Caller about to be awakened
158#define FP_TIMEDOUT 0x20 // Caller timed out
159#define FP_RESPSIZESET 0x10 // Response size copied to work element
160#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
161#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
162#define FP_REMREQUEST 0x02 // Work element removed from request queue
163#define FP_SIGNALED 0x01 // Work element was awakened by a signal
164
165/**
166 * audit[2]: unused
167 */
168
169/**
170 * state of the file handle in private_data.status
171 */
172#define STAT_OPEN 0
173#define STAT_CLOSED 1
174
175/**
176 * PID() expands to the process ID of the current process
177 */
178#define PID() (current->pid)
179
180/**
181 * Selected Constants. The number of APs and the number of devices
182 */
183#ifndef Z90CRYPT_NUM_APS
184#define Z90CRYPT_NUM_APS 64
185#endif
186#ifndef Z90CRYPT_NUM_DEVS
187#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
188#endif
189
190/**
191 * Buffer size for receiving responses. The maximum Response Size
192 * is actually the maximum request size, since in an error condition
193 * the request itself may be returned unchanged.
194 */
195#define MAX_RESPONSE_SIZE 0x0000077C
196
197/**
198 * A count and status-byte mask
199 */
200struct status {
201 int st_count; // # of enabled devices
202 int disabled_count; // # of disabled devices
203 int user_disabled_count; // # of devices disabled via proc fs
204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
205};
206
207/**
208 * The array of device indexes is a mechanism for fast indexing into
209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
211 * z90CDeviceIndex[2] is 47.
212 */
213struct device_x {
214 int device_index[Z90CRYPT_NUM_DEVS];
215};
216
217/**
218 * All devices are arranged in a single array: 64 APs
219 */
220struct device {
221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
222 // PCIXCC_MCL3, CEX2C, CEX2A
223 enum devstat dev_stat; // current device status
224 int dev_self_x; // Index in array
225 int disabled; // Set when device is in error
226 int user_disabled; // Set when device is disabled by user
227 int dev_q_depth; // q depth
228 unsigned char * dev_resp_p; // Response buffer address
229 int dev_resp_l; // Response Buffer length
230 int dev_caller_count; // Number of callers
231 int dev_total_req_cnt; // # requests for device since load
232 struct list_head dev_caller_list; // List of callers
233};
234
235/**
236 * There's a struct status and a struct device_x for each device type.
237 */
238struct hdware_block {
239 struct status hdware_mask;
240 struct status type_mask[Z90CRYPT_NUM_TYPES];
241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
242 unsigned char device_type_array[Z90CRYPT_NUM_APS];
243};
244
245/**
246 * z90crypt is the topmost data structure in the hierarchy.
247 */
248struct z90crypt {
249 int max_count; // Nr of possible crypto devices
250 struct status mask;
251 int q_depth_array[Z90CRYPT_NUM_DEVS];
252 int dev_type_array[Z90CRYPT_NUM_DEVS];
253 struct device_x overall_device_x; // array device indexes
254 struct device * device_p[Z90CRYPT_NUM_DEVS];
255 int terminating;
256 int domain_established;// TRUE: domain has been found
257 int cdx; // Crypto Domain Index
258 int len; // Length of this data structure
259 struct hdware_block *hdware_info;
260};
261
262/**
263 * An array of these structures is pointed to from dev_caller
264 * The length of the array depends on the device type. For APs,
265 * there are 8.
266 *
267 * The caller buffer is allocated to the user at OPEN. At WRITE,
268 * it contains the request; at READ, the response. The function
269 * send_to_crypto_device converts the request to device-dependent
270 * form and use the caller's OPEN-allocated buffer for the response.
271 *
272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
273 * because that points to it, see the discussion in z90hardware.c.
274 * Search for "extended request message block".
275 */
276struct caller {
277 int caller_buf_l; // length of original request
278 unsigned char * caller_buf_p; // Original request on WRITE
279 int caller_dev_dep_req_l; // len device dependent request
280 unsigned char * caller_dev_dep_req_p; // Device dependent form
281 unsigned char caller_id[8]; // caller-supplied message id
282 struct list_head caller_liste;
283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
284};
285
286/**
287 * Function prototypes from z90hardware.c
288 */
289enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
290 int *dev_type);
291enum devstat reset_device(int deviceNr, int cdx, int resetNr);
292enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
293enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
294 unsigned char *resp, unsigned char *psmid);
295int convert_request(unsigned char *buffer, int func, unsigned short function,
296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
297int convert_response(unsigned char *response, unsigned char *buffer,
298 int *respbufflen_p, unsigned char *resp_buff);
299
300/**
301 * Low level function prototypes
302 */
303static int create_z90crypt(int *cdx_p);
304static int refresh_z90crypt(int *cdx_p);
305static int find_crypto_devices(struct status *deviceMask);
306static int create_crypto_device(int index);
307static int destroy_crypto_device(int index);
308static void destroy_z90crypt(void);
309static int refresh_index_array(struct status *status_str,
310 struct device_x *index_array);
311static int probe_device_type(struct device *devPtr);
312static int probe_PCIXCC_type(struct device *devPtr);
313
314/**
315 * proc fs definitions
316 */
317static struct proc_dir_entry *z90crypt_entry;
318
319/**
320 * data structures
321 */
322
323/**
324 * work_element.opener points back to this structure
325 */
326struct priv_data {
327 pid_t opener_pid;
328 unsigned char status; // 0: open 1: closed
329};
330
331/**
332 * A work element is allocated for each request
333 */
334struct work_element {
335 struct priv_data *priv_data;
336 pid_t pid;
337 int devindex; // index of device processing this w_e
338 // (If request did not specify device,
339 // -1 until placed onto a queue)
340 int devtype;
341 struct list_head liste; // used for requestq and pendingq
342 char buffer[128]; // local copy of user request
343 int buff_size; // size of the buffer for the request
344 char resp_buff[RESPBUFFSIZE];
345 int resp_buff_size;
346 char __user * resp_addr; // address of response in user space
347 unsigned int funccode; // function code of request
348 wait_queue_head_t waitq;
349 unsigned long requestsent; // time at which the request was sent
350 atomic_t alarmrung; // wake-up signal
351 unsigned char caller_id[8]; // pid + counter, for this w_e
352 unsigned char status[1]; // bits to mark status of the request
353 unsigned char audit[3]; // record of work element's progress
354 unsigned char * requestptr; // address of request buffer
355 int retcode; // return code of request
356};
357
358/**
359 * High level function prototypes
360 */
361static int z90crypt_open(struct inode *, struct file *);
362static int z90crypt_release(struct inode *, struct file *);
363static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
364static ssize_t z90crypt_write(struct file *, const char __user *,
365 size_t, loff_t *);
366static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
367static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
368
369static void z90crypt_reader_task(unsigned long);
370static void z90crypt_schedule_reader_task(unsigned long);
371static void z90crypt_config_task(unsigned long);
372static void z90crypt_cleanup_task(unsigned long);
373
374static int z90crypt_status(char *, char **, off_t, int, int *, void *);
375static int z90crypt_status_write(struct file *, const char __user *,
376 unsigned long, void *);
377
378/**
379 * Storage allocated at initialization and used throughout the life of
380 * this insmod
381 */
382static int domain = DOMAIN_INDEX;
383static struct z90crypt z90crypt;
384static int quiesce_z90crypt;
385static spinlock_t queuespinlock;
386static struct list_head request_list;
387static int requestq_count;
388static struct list_head pending_list;
389static int pendingq_count;
390
391static struct tasklet_struct reader_tasklet;
392static struct timer_list reader_timer;
393static struct timer_list config_timer;
394static struct timer_list cleanup_timer;
395static atomic_t total_open;
396static atomic_t z90crypt_step;
397
398static struct file_operations z90crypt_fops = {
399 .owner = THIS_MODULE,
400 .read = z90crypt_read,
401 .write = z90crypt_write,
402 .unlocked_ioctl = z90crypt_unlocked_ioctl,
403#ifdef CONFIG_COMPAT
404 .compat_ioctl = z90crypt_compat_ioctl,
405#endif
406 .open = z90crypt_open,
407 .release = z90crypt_release
408};
409
410static struct miscdevice z90crypt_misc_device = {
411 .minor = Z90CRYPT_MINOR,
412 .name = DEV_NAME,
413 .fops = &z90crypt_fops,
414};
415
416/**
417 * Documentation values.
418 */
419MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
420 "and Jochen Roehrig");
421MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
422 "Copyright 2001, 2005 IBM Corporation");
423MODULE_LICENSE("GPL");
424module_param(domain, int, 0);
425MODULE_PARM_DESC(domain, "domain index for device");
426
427#ifdef CONFIG_COMPAT
428/**
429 * ioctl32 conversion routines
430 */
431struct ica_rsa_modexpo_32 { // For 32-bit callers
432 compat_uptr_t inputdata;
433 unsigned int inputdatalength;
434 compat_uptr_t outputdata;
435 unsigned int outputdatalength;
436 compat_uptr_t b_key;
437 compat_uptr_t n_modulus;
438};
439
440static long
441trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
442{
443 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
444 struct ica_rsa_modexpo_32 mex32k;
445 struct ica_rsa_modexpo __user *mex64;
446 long ret = 0;
447 unsigned int i;
448
449 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
450 return -EFAULT;
451 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
452 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
453 return -EFAULT;
454 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
455 return -EFAULT;
456 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
457 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
458 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
459 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
460 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
461 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
462 return -EFAULT;
463 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
464 if (!ret)
465 if (__get_user(i, &mex64->outputdatalength) ||
466 __put_user(i, &mex32u->outputdatalength))
467 ret = -EFAULT;
468 return ret;
469}
470
471struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
472 compat_uptr_t inputdata;
473 unsigned int inputdatalength;
474 compat_uptr_t outputdata;
475 unsigned int outputdatalength;
476 compat_uptr_t bp_key;
477 compat_uptr_t bq_key;
478 compat_uptr_t np_prime;
479 compat_uptr_t nq_prime;
480 compat_uptr_t u_mult_inv;
481};
482
483static long
484trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
485{
486 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
487 struct ica_rsa_modexpo_crt_32 crt32k;
488 struct ica_rsa_modexpo_crt __user *crt64;
489 long ret = 0;
490 unsigned int i;
491
492 if (!access_ok(VERIFY_WRITE, crt32u,
493 sizeof(struct ica_rsa_modexpo_crt_32)))
494 return -EFAULT;
495 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
496 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
497 return -EFAULT;
498 if (copy_from_user(&crt32k, crt32u,
499 sizeof(struct ica_rsa_modexpo_crt_32)))
500 return -EFAULT;
501 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
502 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
503 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
504 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
505 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
506 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
507 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
508 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
509 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
510 return -EFAULT;
511 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
512 if (!ret)
513 if (__get_user(i, &crt64->outputdatalength) ||
514 __put_user(i, &crt32u->outputdatalength))
515 ret = -EFAULT;
516 return ret;
517}
518
519static long
520z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
521{
522 switch (cmd) {
523 case ICAZ90STATUS:
524 case Z90QUIESCE:
525 case Z90STAT_TOTALCOUNT:
526 case Z90STAT_PCICACOUNT:
527 case Z90STAT_PCICCCOUNT:
528 case Z90STAT_PCIXCCCOUNT:
529 case Z90STAT_PCIXCCMCL2COUNT:
530 case Z90STAT_PCIXCCMCL3COUNT:
531 case Z90STAT_CEX2CCOUNT:
532 case Z90STAT_REQUESTQ_COUNT:
533 case Z90STAT_PENDINGQ_COUNT:
534 case Z90STAT_TOTALOPEN_COUNT:
535 case Z90STAT_DOMAIN_INDEX:
536 case Z90STAT_STATUS_MASK:
537 case Z90STAT_QDEPTH_MASK:
538 case Z90STAT_PERDEV_REQCNT:
539 return z90crypt_unlocked_ioctl(filp, cmd, arg);
540 case ICARSAMODEXPO:
541 return trans_modexpo32(filp, cmd, arg);
542 case ICARSACRT:
543 return trans_modexpo_crt32(filp, cmd, arg);
544 default:
545 return -ENOIOCTLCMD;
546 }
547}
548#endif
549
550/**
551 * The module initialization code.
552 */
553static int __init
554z90crypt_init_module(void)
555{
556 int result, nresult;
557 struct proc_dir_entry *entry;
558
559 PDEBUG("PID %d\n", PID());
560
561 if ((domain < -1) || (domain > 15)) {
562 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
563 return -EINVAL;
564 }
565
566 /* Register as misc device with given minor (or get a dynamic one). */
567 result = misc_register(&z90crypt_misc_device);
568 if (result < 0) {
569 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
570 z90crypt_misc_device.minor, result);
571 return result;
572 }
573
574 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
575
576 result = create_z90crypt(&domain);
577 if (result != 0) {
578 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
579 domain, result);
580 result = -ENOMEM;
581 goto init_module_cleanup;
582 }
583
584 if (result == 0) {
585 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
586 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
587 __DATE__, __TIME__);
588 PDEBUG("create_z90crypt (domain index %d) successful.\n",
589 domain);
590 } else
591 PRINTK("No devices at startup\n");
592
593 /* Initialize globals. */
594 spin_lock_init(&queuespinlock);
595
596 INIT_LIST_HEAD(&pending_list);
597 pendingq_count = 0;
598
599 INIT_LIST_HEAD(&request_list);
600 requestq_count = 0;
601
602 quiesce_z90crypt = 0;
603
604 atomic_set(&total_open, 0);
605 atomic_set(&z90crypt_step, 0);
606
607 /* Set up the cleanup task. */
608 init_timer(&cleanup_timer);
609 cleanup_timer.function = z90crypt_cleanup_task;
610 cleanup_timer.data = 0;
611 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
612 add_timer(&cleanup_timer);
613
614 /* Set up the proc file system */
615 entry = create_proc_entry("driver/z90crypt", 0644, 0);
616 if (entry) {
617 entry->nlink = 1;
618 entry->data = 0;
619 entry->read_proc = z90crypt_status;
620 entry->write_proc = z90crypt_status_write;
621 }
622 else
623 PRINTK("Couldn't create z90crypt proc entry\n");
624 z90crypt_entry = entry;
625
626 /* Set up the configuration task. */
627 init_timer(&config_timer);
628 config_timer.function = z90crypt_config_task;
629 config_timer.data = 0;
630 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
631 add_timer(&config_timer);
632
633 /* Set up the reader task */
634 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
635 init_timer(&reader_timer);
636 reader_timer.function = z90crypt_schedule_reader_task;
637 reader_timer.data = 0;
638 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
639 add_timer(&reader_timer);
640
641 return 0; // success
642
643init_module_cleanup:
644 if ((nresult = misc_deregister(&z90crypt_misc_device)))
645 PRINTK("misc_deregister failed with %d.\n", nresult);
646 else
647 PDEBUG("misc_deregister successful.\n");
648
649 return result; // failure
650}
651
652/**
653 * The module termination code
654 */
655static void __exit
656z90crypt_cleanup_module(void)
657{
658 int nresult;
659
660 PDEBUG("PID %d\n", PID());
661
662 remove_proc_entry("driver/z90crypt", 0);
663
664 if ((nresult = misc_deregister(&z90crypt_misc_device)))
665 PRINTK("misc_deregister failed with %d.\n", nresult);
666 else
667 PDEBUG("misc_deregister successful.\n");
668
669 /* Remove the tasks */
670 tasklet_kill(&reader_tasklet);
671 del_timer(&reader_timer);
672 del_timer(&config_timer);
673 del_timer(&cleanup_timer);
674
675 destroy_z90crypt();
676
677 PRINTKN("Unloaded.\n");
678}
679
680/**
681 * Functions running under a process id
682 *
683 * The I/O functions:
684 * z90crypt_open
685 * z90crypt_release
686 * z90crypt_read
687 * z90crypt_write
688 * z90crypt_unlocked_ioctl
689 * z90crypt_status
690 * z90crypt_status_write
691 * disable_card
692 * enable_card
693 *
694 * Helper functions:
695 * z90crypt_rsa
696 * z90crypt_prepare
697 * z90crypt_send
698 * z90crypt_process_results
699 *
700 */
701static int
702z90crypt_open(struct inode *inode, struct file *filp)
703{
704 struct priv_data *private_data_p;
705
706 if (quiesce_z90crypt)
707 return -EQUIESCE;
708
709 private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL);
710 if (!private_data_p) {
711 PRINTK("Memory allocate failed\n");
712 return -ENOMEM;
713 }
714
715 private_data_p->status = STAT_OPEN;
716 private_data_p->opener_pid = PID();
717 filp->private_data = private_data_p;
718 atomic_inc(&total_open);
719
720 return 0;
721}
722
723static int
724z90crypt_release(struct inode *inode, struct file *filp)
725{
726 struct priv_data *private_data_p = filp->private_data;
727
728 PDEBUG("PID %d (filp %p)\n", PID(), filp);
729
730 private_data_p->status = STAT_CLOSED;
731 memset(private_data_p, 0, sizeof(struct priv_data));
732 kfree(private_data_p);
733 atomic_dec(&total_open);
734
735 return 0;
736}
737
738/*
739 * there are two read functions, of which compile options will choose one
740 * without USE_GET_RANDOM_BYTES
741 * => read() always returns -EPERM;
742 * otherwise
743 * => read() uses get_random_bytes() kernel function
744 */
745#ifndef USE_GET_RANDOM_BYTES
746/**
747 * z90crypt_read will not be supported beyond z90crypt 1.3.1
748 */
749static ssize_t
750z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
751{
752 PDEBUG("filp %p (PID %d)\n", filp, PID());
753 return -EPERM;
754}
755#else // we want to use get_random_bytes
756/**
757 * read() just returns a string of random bytes. Since we have no way
758 * to generate these cryptographically, we just execute get_random_bytes
759 * for the length specified.
760 */
761#include <linux/random.h>
762static ssize_t
763z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
764{
765 unsigned char *temp_buff;
766
767 PDEBUG("filp %p (PID %d)\n", filp, PID());
768
769 if (quiesce_z90crypt)
770 return -EQUIESCE;
771 if (count < 0) {
772 PRINTK("Requested random byte count negative: %ld\n", count);
773 return -EINVAL;
774 }
775 if (count > RESPBUFFSIZE) {
776 PDEBUG("count[%d] > RESPBUFFSIZE", count);
777 return -EINVAL;
778 }
779 if (count == 0)
780 return 0;
781 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
782 if (!temp_buff) {
783 PRINTK("Memory allocate failed\n");
784 return -ENOMEM;
785 }
786 get_random_bytes(temp_buff, count);
787
788 if (copy_to_user(buf, temp_buff, count) != 0) {
789 kfree(temp_buff);
790 return -EFAULT;
791 }
792 kfree(temp_buff);
793 return count;
794}
795#endif
796
797/**
798 * Write is is not allowed
799 */
800static ssize_t
801z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
802{
803 PDEBUG("filp %p (PID %d)\n", filp, PID());
804 return -EPERM;
805}
806
807/**
808 * New status functions
809 */
810static inline int
811get_status_totalcount(void)
812{
813 return z90crypt.hdware_info->hdware_mask.st_count;
814}
815
816static inline int
817get_status_PCICAcount(void)
818{
819 return z90crypt.hdware_info->type_mask[PCICA].st_count;
820}
821
822static inline int
823get_status_PCICCcount(void)
824{
825 return z90crypt.hdware_info->type_mask[PCICC].st_count;
826}
827
828static inline int
829get_status_PCIXCCcount(void)
830{
831 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
832 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
833}
834
835static inline int
836get_status_PCIXCCMCL2count(void)
837{
838 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
839}
840
841static inline int
842get_status_PCIXCCMCL3count(void)
843{
844 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
845}
846
847static inline int
848get_status_CEX2Ccount(void)
849{
850 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
851}
852
853static inline int
854get_status_CEX2Acount(void)
855{
856 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
857}
858
859static inline int
860get_status_requestq_count(void)
861{
862 return requestq_count;
863}
864
865static inline int
866get_status_pendingq_count(void)
867{
868 return pendingq_count;
869}
870
871static inline int
872get_status_totalopen_count(void)
873{
874 return atomic_read(&total_open);
875}
876
877static inline int
878get_status_domain_index(void)
879{
880 return z90crypt.cdx;
881}
882
883static inline unsigned char *
884get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
885{
886 int i, ix;
887
888 memcpy(status, z90crypt.hdware_info->device_type_array,
889 Z90CRYPT_NUM_APS);
890
891 for (i = 0; i < get_status_totalcount(); i++) {
892 ix = SHRT2LONG(i);
893 if (LONG2DEVPTR(ix)->user_disabled)
894 status[ix] = 0x0d;
895 }
896
897 return status;
898}
899
900static inline unsigned char *
901get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
902{
903 int i, ix;
904
905 memset(qdepth, 0, Z90CRYPT_NUM_APS);
906
907 for (i = 0; i < get_status_totalcount(); i++) {
908 ix = SHRT2LONG(i);
909 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
910 }
911
912 return qdepth;
913}
914
915static inline unsigned int *
916get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
917{
918 int i, ix;
919
920 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
921
922 for (i = 0; i < get_status_totalcount(); i++) {
923 ix = SHRT2LONG(i);
924 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
925 }
926
927 return reqcnt;
928}
929
930static inline void
931init_work_element(struct work_element *we_p,
932 struct priv_data *priv_data, pid_t pid)
933{
934 int step;
935
936 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
937 /* Come up with a unique id for this caller. */
938 step = atomic_inc_return(&z90crypt_step);
939 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
940 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
941 we_p->pid = pid;
942 we_p->priv_data = priv_data;
943 we_p->status[0] = STAT_DEFAULT;
944 we_p->audit[0] = 0x00;
945 we_p->audit[1] = 0x00;
946 we_p->audit[2] = 0x00;
947 we_p->resp_buff_size = 0;
948 we_p->retcode = 0;
949 we_p->devindex = -1;
950 we_p->devtype = -1;
951 atomic_set(&we_p->alarmrung, 0);
952 init_waitqueue_head(&we_p->waitq);
953 INIT_LIST_HEAD(&(we_p->liste));
954}
955
956static inline int
957allocate_work_element(struct work_element **we_pp,
958 struct priv_data *priv_data_p, pid_t pid)
959{
960 struct work_element *we_p;
961
962 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
963 if (!we_p)
964 return -ENOMEM;
965 init_work_element(we_p, priv_data_p, pid);
966 *we_pp = we_p;
967 return 0;
968}
969
970static inline void
971remove_device(struct device *device_p)
972{
973 if (!device_p || (device_p->disabled != 0))
974 return;
975 device_p->disabled = 1;
976 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
977 z90crypt.hdware_info->hdware_mask.disabled_count++;
978}
979
980/**
981 * Bitlength limits for each card
982 *
983 * There are new MCLs which allow more bitlengths. See the table for details.
984 * The MCL must be applied and the newer bitlengths enabled for these to work.
985 *
986 * Card Type Old limit New limit
987 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
988 * PCICC 512-1024 512-2048
989 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
990 * PCIXCC_MCL3 ----- 128-2048
991 * CEX2C 512-2048 128-2048
992 * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
993 *
994 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
995 * MCL to just one card in a machine. We assume, at first, that all cards have
996 * these capabilities.
997 */
998int ext_bitlens = 1; // This is global
999#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1000#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1001#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1002#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1003#define MAX_MOD_SIZE 256 // 2048 bits
1004
1005static inline int
1006select_device_type(int *dev_type_p, int bytelength)
1007{
1008 static int count = 0;
1009 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1010 index_to_use;
1011 struct status *stat;
1012 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1013 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1014 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1015 (*dev_type_p != ANYDEV))
1016 return -1;
1017 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1019 if (stat->st_count >
1020 (stat->disabled_count + stat->user_disabled_count))
1021 return 0;
1022 return -1;
1023 }
1024
1025 /**
1026 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1027 * speed.
1028 *
1029 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1030 * other present.
1031 */
1032 stat = &z90crypt.hdware_info->type_mask[PCICA];
1033 PCICA_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count);
1035 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1036 PCIXCC_MCL3_avail = stat->st_count -
1037 (stat->disabled_count + stat->user_disabled_count);
1038 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1039 CEX2C_avail = stat->st_count -
1040 (stat->disabled_count + stat->user_disabled_count);
1041 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1042 CEX2A_avail = stat->st_count -
1043 (stat->disabled_count + stat->user_disabled_count);
1044 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1045 /**
1046 * bitlength is a factor, PCICA or CEX2A are the most capable,
1047 * even with the new MCL for PCIXCC.
1048 */
1049 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1050 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1051 if (PCICA_avail) {
1052 *dev_type_p = PCICA;
1053 return 0;
1054 }
1055 if (CEX2A_avail) {
1056 *dev_type_p = CEX2A;
1057 return 0;
1058 }
1059 return -1;
1060 }
1061
1062 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1063 CEX2C_avail + CEX2A_avail);
1064 if (index_to_use < PCICA_avail)
1065 *dev_type_p = PCICA;
1066 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1067 *dev_type_p = PCIXCC_MCL3;
1068 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1069 CEX2C_avail))
1070 *dev_type_p = CEX2C;
1071 else
1072 *dev_type_p = CEX2A;
1073 count++;
1074 return 0;
1075 }
1076
1077 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1078 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1079 return -1;
1080 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1081 if (stat->st_count >
1082 (stat->disabled_count + stat->user_disabled_count)) {
1083 *dev_type_p = PCIXCC_MCL2;
1084 return 0;
1085 }
1086
1087 /**
1088 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1089 * (if we don't have the MCL applied and the newer bitlengths enabled)
1090 * cannot go to a PCICC
1091 */
1092 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1093 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1094 return -1;
1095 }
1096 stat = &z90crypt.hdware_info->type_mask[PCICC];
1097 if (stat->st_count >
1098 (stat->disabled_count + stat->user_disabled_count)) {
1099 *dev_type_p = PCICC;
1100 return 0;
1101 }
1102
1103 return -1;
1104}
1105
1106/**
1107 * Try the selected number, then the selected type (can be ANYDEV)
1108 */
1109static inline int
1110select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1111{
1112 int i, indx, devTp, low_count, low_indx;
1113 struct device_x *index_p;
1114 struct device *dev_ptr;
1115
1116 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1117 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1118 PDEBUG("trying index = %d\n", *device_nr_p);
1119 dev_ptr = z90crypt.device_p[*device_nr_p];
1120
1121 if (dev_ptr &&
1122 (dev_ptr->dev_stat != DEV_GONE) &&
1123 (dev_ptr->disabled == 0) &&
1124 (dev_ptr->user_disabled == 0)) {
1125 PDEBUG("selected by number, index = %d\n",
1126 *device_nr_p);
1127 *dev_type_p = dev_ptr->dev_type;
1128 return *device_nr_p;
1129 }
1130 }
1131 *device_nr_p = -1;
1132 PDEBUG("trying type = %d\n", *dev_type_p);
1133 devTp = *dev_type_p;
1134 if (select_device_type(&devTp, bytelength) == -1) {
1135 PDEBUG("failed to select by type\n");
1136 return -1;
1137 }
1138 PDEBUG("selected type = %d\n", devTp);
1139 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1140 low_count = 0x0000FFFF;
1141 low_indx = -1;
1142 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1143 indx = index_p->device_index[i];
1144 dev_ptr = z90crypt.device_p[indx];
1145 if (dev_ptr &&
1146 (dev_ptr->dev_stat != DEV_GONE) &&
1147 (dev_ptr->disabled == 0) &&
1148 (dev_ptr->user_disabled == 0) &&
1149 (devTp == dev_ptr->dev_type) &&
1150 (low_count > dev_ptr->dev_caller_count)) {
1151 low_count = dev_ptr->dev_caller_count;
1152 low_indx = indx;
1153 }
1154 }
1155 *device_nr_p = low_indx;
1156 return low_indx;
1157}
1158
1159static inline int
1160send_to_crypto_device(struct work_element *we_p)
1161{
1162 struct caller *caller_p;
1163 struct device *device_p;
1164 int dev_nr;
1165 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1166
1167 if (!we_p->requestptr)
1168 return SEN_FATAL_ERROR;
1169 caller_p = (struct caller *)we_p->requestptr;
1170 dev_nr = we_p->devindex;
1171 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1172 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1173 return SEN_RETRY;
1174 else
1175 return SEN_NOT_AVAIL;
1176 }
1177 we_p->devindex = dev_nr;
1178 device_p = z90crypt.device_p[dev_nr];
1179 if (!device_p)
1180 return SEN_NOT_AVAIL;
1181 if (device_p->dev_type != we_p->devtype)
1182 return SEN_RETRY;
1183 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1184 return SEN_QUEUE_FULL;
1185 PDEBUG("device number prior to send: %d\n", dev_nr);
1186 switch (send_to_AP(dev_nr, z90crypt.cdx,
1187 caller_p->caller_dev_dep_req_l,
1188 caller_p->caller_dev_dep_req_p)) {
1189 case DEV_SEN_EXCEPTION:
1190 PRINTKC("Exception during send to device %d\n", dev_nr);
1191 z90crypt.terminating = 1;
1192 return SEN_FATAL_ERROR;
1193 case DEV_GONE:
1194 PRINTK("Device %d not available\n", dev_nr);
1195 remove_device(device_p);
1196 return SEN_NOT_AVAIL;
1197 case DEV_EMPTY:
1198 return SEN_NOT_AVAIL;
1199 case DEV_NO_WORK:
1200 return SEN_FATAL_ERROR;
1201 case DEV_BAD_MESSAGE:
1202 return SEN_USER_ERROR;
1203 case DEV_QUEUE_FULL:
1204 return SEN_QUEUE_FULL;
1205 default:
1206 case DEV_ONLINE:
1207 break;
1208 }
1209 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1210 device_p->dev_caller_count++;
1211 return 0;
1212}
1213
1214/**
1215 * Send puts the user's work on one of two queues:
1216 * the pending queue if the send was successful
1217 * the request queue if the send failed because device full or busy
1218 */
1219static inline int
1220z90crypt_send(struct work_element *we_p, const char *buf)
1221{
1222 int rv;
1223
1224 PDEBUG("PID %d\n", PID());
1225
1226 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1227 PDEBUG("PID %d tried to send more work but has outstanding "
1228 "work.\n", PID());
1229 return -EWORKPEND;
1230 }
1231 we_p->devindex = -1; // Reset device number
1232 spin_lock_irq(&queuespinlock);
1233 rv = send_to_crypto_device(we_p);
1234 switch (rv) {
1235 case 0:
1236 we_p->requestsent = jiffies;
1237 we_p->audit[0] |= FP_SENT;
1238 list_add_tail(&we_p->liste, &pending_list);
1239 ++pendingq_count;
1240 we_p->audit[0] |= FP_PENDING;
1241 break;
1242 case SEN_BUSY:
1243 case SEN_QUEUE_FULL:
1244 rv = 0;
1245 we_p->devindex = -1; // any device will do
1246 we_p->requestsent = jiffies;
1247 list_add_tail(&we_p->liste, &request_list);
1248 ++requestq_count;
1249 we_p->audit[0] |= FP_REQUEST;
1250 break;
1251 case SEN_RETRY:
1252 rv = -ERESTARTSYS;
1253 break;
1254 case SEN_NOT_AVAIL:
1255 PRINTK("*** No devices available.\n");
1256 rv = we_p->retcode = -ENODEV;
1257 we_p->status[0] |= STAT_FAILED;
1258 break;
1259 case REC_OPERAND_INV:
1260 case REC_OPERAND_SIZE:
1261 case REC_EVEN_MOD:
1262 case REC_INVALID_PAD:
1263 rv = we_p->retcode = -EINVAL;
1264 we_p->status[0] |= STAT_FAILED;
1265 break;
1266 default:
1267 we_p->retcode = rv;
1268 we_p->status[0] |= STAT_FAILED;
1269 break;
1270 }
1271 if (rv != -ERESTARTSYS)
1272 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1273 spin_unlock_irq(&queuespinlock);
1274 if (rv == 0)
1275 tasklet_schedule(&reader_tasklet);
1276 return rv;
1277}
1278
1279/**
1280 * process_results copies the user's work from kernel space.
1281 */
1282static inline int
1283z90crypt_process_results(struct work_element *we_p, char __user *buf)
1284{
1285 int rv;
1286
1287 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1288
1289 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1290 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1291
1292 rv = 0;
1293 if (!we_p->buffer) {
1294 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1295 we_p, PID());
1296 rv = -ENOBUFF;
1297 }
1298
1299 if (!rv)
1300 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1301 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1302 rv = -EFAULT;
1303 }
1304
1305 if (!rv)
1306 rv = we_p->retcode;
1307 if (!rv)
1308 if (we_p->resp_buff_size
1309 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1310 we_p->resp_buff_size))
1311 rv = -EFAULT;
1312
1313 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1314 return rv;
1315}
1316
1317static unsigned char NULL_psmid[8] =
1318{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1319
1320/**
1321 * Used in device configuration functions
1322 */
1323#define MAX_RESET 90
1324
1325/**
1326 * This is used only for PCICC support
1327 */
1328static inline int
1329is_PKCS11_padded(unsigned char *buffer, int length)
1330{
1331 int i;
1332 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1333 return 0;
1334 for (i = 2; i < length; i++)
1335 if (buffer[i] != 0xFF)
1336 break;
1337 if ((i < 10) || (i == length))
1338 return 0;
1339 if (buffer[i] != 0x00)
1340 return 0;
1341 return 1;
1342}
1343
1344/**
1345 * This is used only for PCICC support
1346 */
1347static inline int
1348is_PKCS12_padded(unsigned char *buffer, int length)
1349{
1350 int i;
1351 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1352 return 0;
1353 for (i = 2; i < length; i++)
1354 if (buffer[i] == 0x00)
1355 break;
1356 if ((i < 10) || (i == length))
1357 return 0;
1358 if (buffer[i] != 0x00)
1359 return 0;
1360 return 1;
1361}
1362
1363/**
1364 * builds struct caller and converts message from generic format to
1365 * device-dependent format
1366 * func is ICARSAMODEXPO or ICARSACRT
1367 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1368 */
1369static inline int
1370build_caller(struct work_element *we_p, short function)
1371{
1372 int rv;
1373 struct caller *caller_p = (struct caller *)we_p->requestptr;
1374
1375 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1376 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1377 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1378 return SEN_NOT_AVAIL;
1379
1380 memcpy(caller_p->caller_id, we_p->caller_id,
1381 sizeof(caller_p->caller_id));
1382 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1383 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1384 caller_p->caller_buf_p = we_p->buffer;
1385 INIT_LIST_HEAD(&(caller_p->caller_liste));
1386
1387 rv = convert_request(we_p->buffer, we_p->funccode, function,
1388 z90crypt.cdx, we_p->devtype,
1389 &caller_p->caller_dev_dep_req_l,
1390 caller_p->caller_dev_dep_req_p);
1391 if (rv) {
1392 if (rv == SEN_NOT_AVAIL)
1393 PDEBUG("request can't be processed on hdwr avail\n");
1394 else
1395 PRINTK("Error from convert_request: %d\n", rv);
1396 }
1397 else
1398 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1399 return rv;
1400}
1401
1402static inline void
1403unbuild_caller(struct device *device_p, struct caller *caller_p)
1404{
1405 if (!caller_p)
1406 return;
1407 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1408 if (!list_empty(&caller_p->caller_liste)) {
1409 list_del_init(&caller_p->caller_liste);
1410 device_p->dev_caller_count--;
1411 }
1412 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1413}
1414
1415static inline int
1416get_crypto_request_buffer(struct work_element *we_p)
1417{
1418 struct ica_rsa_modexpo *mex_p;
1419 struct ica_rsa_modexpo_crt *crt_p;
1420 unsigned char *temp_buffer;
1421 short function;
1422 int rv;
1423
1424 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1425 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1426
1427 PDEBUG("device type input = %d\n", we_p->devtype);
1428
1429 if (z90crypt.terminating)
1430 return REC_NO_RESPONSE;
1431 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1432 PRINTK("psmid zeroes\n");
1433 return SEN_FATAL_ERROR;
1434 }
1435 if (!we_p->buffer) {
1436 PRINTK("buffer pointer NULL\n");
1437 return SEN_USER_ERROR;
1438 }
1439 if (!we_p->requestptr) {
1440 PRINTK("caller pointer NULL\n");
1441 return SEN_USER_ERROR;
1442 }
1443
1444 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1445 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1446 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1447 (we_p->devtype != ANYDEV)) {
1448 PRINTK("invalid device type\n");
1449 return SEN_USER_ERROR;
1450 }
1451
1452 if ((mex_p->inputdatalength < 1) ||
1453 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1454 PRINTK("inputdatalength[%d] is not valid\n",
1455 mex_p->inputdatalength);
1456 return SEN_USER_ERROR;
1457 }
1458
1459 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1460 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1461 mex_p->outputdatalength, mex_p->inputdatalength);
1462 return SEN_USER_ERROR;
1463 }
1464
1465 if (!mex_p->inputdata || !mex_p->outputdata) {
1466 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1467 mex_p->outputdata, mex_p->inputdata);
1468 return SEN_USER_ERROR;
1469 }
1470
1471 /**
1472 * As long as outputdatalength is big enough, we can set the
1473 * outputdatalength equal to the inputdatalength, since that is the
1474 * number of bytes we will copy in any case
1475 */
1476 mex_p->outputdatalength = mex_p->inputdatalength;
1477
1478 rv = 0;
1479 switch (we_p->funccode) {
1480 case ICARSAMODEXPO:
1481 if (!mex_p->b_key || !mex_p->n_modulus)
1482 rv = SEN_USER_ERROR;
1483 break;
1484 case ICARSACRT:
1485 if (!IS_EVEN(crt_p->inputdatalength)) {
1486 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1487 crt_p->inputdatalength);
1488 rv = SEN_USER_ERROR;
1489 break;
1490 }
1491 if (!crt_p->bp_key ||
1492 !crt_p->bq_key ||
1493 !crt_p->np_prime ||
1494 !crt_p->nq_prime ||
1495 !crt_p->u_mult_inv) {
1496 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1497 crt_p->bp_key, crt_p->bq_key,
1498 crt_p->np_prime, crt_p->nq_prime,
1499 crt_p->u_mult_inv);
1500 rv = SEN_USER_ERROR;
1501 }
1502 break;
1503 default:
1504 PRINTK("bad func = %d\n", we_p->funccode);
1505 rv = SEN_USER_ERROR;
1506 break;
1507 }
1508 if (rv != 0)
1509 return rv;
1510
1511 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1512 return SEN_NOT_AVAIL;
1513
1514 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1515 sizeof(struct caller);
1516 if (copy_from_user(temp_buffer, mex_p->inputdata,
1517 mex_p->inputdatalength) != 0)
1518 return SEN_RELEASED;
1519
1520 function = PCI_FUNC_KEY_ENCRYPT;
1521 switch (we_p->devtype) {
1522 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1523 case PCICA:
1524 case CEX2A:
1525 function = PCI_FUNC_KEY_ENCRYPT;
1526 break;
1527 /**
1528 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1529 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1530 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1531 * mod-expo operation
1532 */
1533 case PCIXCC_MCL2:
1534 if (we_p->funccode == ICARSAMODEXPO)
1535 function = PCI_FUNC_KEY_ENCRYPT;
1536 else
1537 function = PCI_FUNC_KEY_DECRYPT;
1538 break;
1539 case PCIXCC_MCL3:
1540 case CEX2C:
1541 if (we_p->funccode == ICARSAMODEXPO)
1542 function = PCI_FUNC_KEY_ENCRYPT;
1543 else
1544 function = PCI_FUNC_KEY_DECRYPT;
1545 break;
1546 /**
1547 * PCICC does everything as a PKCS-1.2 format request
1548 */
1549 case PCICC:
1550 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1551 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1552 return SEN_NOT_AVAIL;
1553 }
1554 if (we_p->funccode == ICARSAMODEXPO) {
1555 if (is_PKCS12_padded(temp_buffer,
1556 mex_p->inputdatalength))
1557 function = PCI_FUNC_KEY_ENCRYPT;
1558 else
1559 function = PCI_FUNC_KEY_DECRYPT;
1560 } else
1561 /* all CRT forms are decrypts */
1562 function = PCI_FUNC_KEY_DECRYPT;
1563 break;
1564 }
1565 PDEBUG("function: %04x\n", function);
1566 rv = build_caller(we_p, function);
1567 PDEBUG("rv from build_caller = %d\n", rv);
1568 return rv;
1569}
1570
1571static inline int
1572z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1573 const char __user *buffer)
1574{
1575 int rv;
1576
1577 we_p->devindex = -1;
1578 if (funccode == ICARSAMODEXPO)
1579 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1580 else
1581 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1582
1583 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1584 return -EFAULT;
1585
1586 we_p->audit[0] |= FP_COPYFROM;
1587 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1588 we_p->funccode = funccode;
1589 we_p->devtype = -1;
1590 we_p->audit[0] |= FP_BUFFREQ;
1591 rv = get_crypto_request_buffer(we_p);
1592 switch (rv) {
1593 case 0:
1594 we_p->audit[0] |= FP_BUFFGOT;
1595 break;
1596 case SEN_USER_ERROR:
1597 rv = -EINVAL;
1598 break;
1599 case SEN_QUEUE_FULL:
1600 rv = 0;
1601 break;
1602 case SEN_RELEASED:
1603 rv = -EFAULT;
1604 break;
1605 case REC_NO_RESPONSE:
1606 rv = -ENODEV;
1607 break;
1608 case SEN_NOT_AVAIL:
1609 case EGETBUFF:
1610 rv = -EGETBUFF;
1611 break;
1612 default:
1613 PRINTK("rv = %d\n", rv);
1614 rv = -EGETBUFF;
1615 break;
1616 }
1617 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1618 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1619 return rv;
1620}
1621
1622static inline void
1623purge_work_element(struct work_element *we_p)
1624{
1625 struct list_head *lptr;
1626
1627 spin_lock_irq(&queuespinlock);
1628 list_for_each(lptr, &request_list) {
1629 if (lptr == &we_p->liste) {
1630 list_del_init(lptr);
1631 requestq_count--;
1632 break;
1633 }
1634 }
1635 list_for_each(lptr, &pending_list) {
1636 if (lptr == &we_p->liste) {
1637 list_del_init(lptr);
1638 pendingq_count--;
1639 break;
1640 }
1641 }
1642 spin_unlock_irq(&queuespinlock);
1643}
1644
1645/**
1646 * Build the request and send it.
1647 */
1648static inline int
1649z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct work_element *we_p;
1653 int rv;
1654
1655 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1656 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1657 return rv;
1658 }
1659 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1660 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1661 if (!rv)
1662 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1663 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1664 if (!rv) {
1665 we_p->audit[0] |= FP_ASLEEP;
1666 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1667 we_p->audit[0] |= FP_AWAKE;
1668 rv = we_p->retcode;
1669 }
1670 if (!rv)
1671 rv = z90crypt_process_results(we_p, (char __user *)arg);
1672
1673 if ((we_p->status[0] & STAT_FAILED)) {
1674 switch (rv) {
1675 /**
1676 * EINVAL *after* receive is almost always a padding error or
1677 * length error issued by a coprocessor (not an accelerator).
1678 * We convert this return value to -EGETBUFF which should
1679 * trigger a fallback to software.
1680 */
1681 case -EINVAL:
1682 if ((we_p->devtype != PCICA) &&
1683 (we_p->devtype != CEX2A))
1684 rv = -EGETBUFF;
1685 break;
1686 case -ETIMEOUT:
1687 if (z90crypt.mask.st_count > 0)
1688 rv = -ERESTARTSYS; // retry with another
1689 else
1690 rv = -ENODEV; // no cards left
1691 /* fall through to clean up request queue */
1692 case -ERESTARTSYS:
1693 case -ERELEASED:
1694 switch (CHK_RDWRMASK(we_p->status[0])) {
1695 case STAT_WRITTEN:
1696 purge_work_element(we_p);
1697 break;
1698 case STAT_READPEND:
1699 case STAT_NOWORK:
1700 default:
1701 break;
1702 }
1703 break;
1704 default:
1705 we_p->status[0] ^= STAT_FAILED;
1706 break;
1707 }
1708 }
1709 free_page((long)we_p);
1710 return rv;
1711}
1712
1713/**
1714 * This function is a little long, but it's really just one large switch
1715 * statement.
1716 */
1717static long
1718z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1719{
1720 struct priv_data *private_data_p = filp->private_data;
1721 unsigned char *status;
1722 unsigned char *qdepth;
1723 unsigned int *reqcnt;
1724 struct ica_z90_status *pstat;
1725 int ret, i, loopLim, tempstat;
1726 static int deprecated_msg_count1 = 0;
1727 static int deprecated_msg_count2 = 0;
1728
1729 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1730 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1731 cmd,
1732 !_IOC_DIR(cmd) ? "NO"
1733 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1734 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1735 : "WR")),
1736 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1737
1738 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1739 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1740 return -ENOTTY;
1741 }
1742
1743 ret = 0;
1744 switch (cmd) {
1745 case ICARSAMODEXPO:
1746 case ICARSACRT:
1747 if (quiesce_z90crypt) {
1748 ret = -EQUIESCE;
1749 break;
1750 }
1751 ret = -ENODEV; // Default if no devices
1752 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1753 (z90crypt.hdware_info->hdware_mask.disabled_count +
1754 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1755 for (i = 0; i < loopLim; i++) {
1756 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1757 if (ret != -ERESTARTSYS)
1758 break;
1759 }
1760 if (ret == -ERESTARTSYS)
1761 ret = -ENODEV;
1762 break;
1763
1764 case Z90STAT_TOTALCOUNT:
1765 tempstat = get_status_totalcount();
1766 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1767 ret = -EFAULT;
1768 break;
1769
1770 case Z90STAT_PCICACOUNT:
1771 tempstat = get_status_PCICAcount();
1772 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1773 ret = -EFAULT;
1774 break;
1775
1776 case Z90STAT_PCICCCOUNT:
1777 tempstat = get_status_PCICCcount();
1778 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1779 ret = -EFAULT;
1780 break;
1781
1782 case Z90STAT_PCIXCCMCL2COUNT:
1783 tempstat = get_status_PCIXCCMCL2count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1785 ret = -EFAULT;
1786 break;
1787
1788 case Z90STAT_PCIXCCMCL3COUNT:
1789 tempstat = get_status_PCIXCCMCL3count();
1790 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1791 ret = -EFAULT;
1792 break;
1793
1794 case Z90STAT_CEX2CCOUNT:
1795 tempstat = get_status_CEX2Ccount();
1796 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1797 ret = -EFAULT;
1798 break;
1799
1800 case Z90STAT_CEX2ACOUNT:
1801 tempstat = get_status_CEX2Acount();
1802 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1803 ret = -EFAULT;
1804 break;
1805
1806 case Z90STAT_REQUESTQ_COUNT:
1807 tempstat = get_status_requestq_count();
1808 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1809 ret = -EFAULT;
1810 break;
1811
1812 case Z90STAT_PENDINGQ_COUNT:
1813 tempstat = get_status_pendingq_count();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1818 case Z90STAT_TOTALOPEN_COUNT:
1819 tempstat = get_status_totalopen_count();
1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1821 ret = -EFAULT;
1822 break;
1823
1824 case Z90STAT_DOMAIN_INDEX:
1825 tempstat = get_status_domain_index();
1826 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1827 ret = -EFAULT;
1828 break;
1829
1830 case Z90STAT_STATUS_MASK:
1831 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1832 if (!status) {
1833 PRINTK("kmalloc for status failed!\n");
1834 ret = -ENOMEM;
1835 break;
1836 }
1837 get_status_status_mask(status);
1838 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1839 != 0)
1840 ret = -EFAULT;
1841 kfree(status);
1842 break;
1843
1844 case Z90STAT_QDEPTH_MASK:
1845 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1846 if (!qdepth) {
1847 PRINTK("kmalloc for qdepth failed!\n");
1848 ret = -ENOMEM;
1849 break;
1850 }
1851 get_status_qdepth_mask(qdepth);
1852 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1853 ret = -EFAULT;
1854 kfree(qdepth);
1855 break;
1856
1857 case Z90STAT_PERDEV_REQCNT:
1858 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1859 if (!reqcnt) {
1860 PRINTK("kmalloc for reqcnt failed!\n");
1861 ret = -ENOMEM;
1862 break;
1863 }
1864 get_status_perdevice_reqcnt(reqcnt);
1865 if (copy_to_user((char __user *) arg, reqcnt,
1866 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1867 ret = -EFAULT;
1868 kfree(reqcnt);
1869 break;
1870
1871 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1872 case ICAZ90STATUS:
1873 if (deprecated_msg_count1 < 20) {
1874 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1875 deprecated_msg_count1++;
1876 if (deprecated_msg_count1 == 20)
1877 PRINTK("No longer issuing messages related to "
1878 "deprecated call to ICAZ90STATUS.\n");
1879 }
1880
1881 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1882 if (!pstat) {
1883 PRINTK("kmalloc for pstat failed!\n");
1884 ret = -ENOMEM;
1885 break;
1886 }
1887
1888 pstat->totalcount = get_status_totalcount();
1889 pstat->leedslitecount = get_status_PCICAcount();
1890 pstat->leeds2count = get_status_PCICCcount();
1891 pstat->requestqWaitCount = get_status_requestq_count();
1892 pstat->pendingqWaitCount = get_status_pendingq_count();
1893 pstat->totalOpenCount = get_status_totalopen_count();
1894 pstat->cryptoDomain = get_status_domain_index();
1895 get_status_status_mask(pstat->status);
1896 get_status_qdepth_mask(pstat->qdepth);
1897
1898 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1899 sizeof(struct ica_z90_status)) != 0)
1900 ret = -EFAULT;
1901 kfree(pstat);
1902 break;
1903
1904 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1905 case Z90STAT_PCIXCCCOUNT:
1906 if (deprecated_msg_count2 < 20) {
1907 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1908 deprecated_msg_count2++;
1909 if (deprecated_msg_count2 == 20)
1910 PRINTK("No longer issuing messages about depre"
1911 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1912 }
1913
1914 tempstat = get_status_PCIXCCcount();
1915 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1916 ret = -EFAULT;
1917 break;
1918
1919 case Z90QUIESCE:
1920 if (current->euid != 0) {
1921 PRINTK("QUIESCE fails: euid %d\n",
1922 current->euid);
1923 ret = -EACCES;
1924 } else {
1925 PRINTK("QUIESCE device from PID %d\n", PID());
1926 quiesce_z90crypt = 1;
1927 }
1928 break;
1929
1930 default:
1931 /* user passed an invalid IOCTL number */
1932 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1933 ret = -ENOTTY;
1934 break;
1935 }
1936
1937 return ret;
1938}
1939
1940static inline int
1941sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1942{
1943 int hl, i;
1944
1945 hl = 0;
1946 for (i = 0; i < len; i++)
1947 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1948 hl += sprintf(outaddr+hl, " ");
1949
1950 return hl;
1951}
1952
1953static inline int
1954sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1955{
1956 int hl, inl, c, cx;
1957
1958 hl = sprintf(outaddr, " ");
1959 inl = 0;
1960 for (c = 0; c < (len / 16); c++) {
1961 hl += sprintcl(outaddr+hl, addr+inl, 16);
1962 inl += 16;
1963 }
1964
1965 cx = len%16;
1966 if (cx) {
1967 hl += sprintcl(outaddr+hl, addr+inl, cx);
1968 inl += cx;
1969 }
1970
1971 hl += sprintf(outaddr+hl, "\n");
1972
1973 return hl;
1974}
1975
1976static inline int
1977sprinthx(unsigned char *title, unsigned char *outaddr,
1978 unsigned char *addr, unsigned int len)
1979{
1980 int hl, inl, r, rx;
1981
1982 hl = sprintf(outaddr, "\n%s\n", title);
1983 inl = 0;
1984 for (r = 0; r < (len / 64); r++) {
1985 hl += sprintrw(outaddr+hl, addr+inl, 64);
1986 inl += 64;
1987 }
1988 rx = len % 64;
1989 if (rx) {
1990 hl += sprintrw(outaddr+hl, addr+inl, rx);
1991 inl += rx;
1992 }
1993
1994 hl += sprintf(outaddr+hl, "\n");
1995
1996 return hl;
1997}
1998
1999static inline int
2000sprinthx4(unsigned char *title, unsigned char *outaddr,
2001 unsigned int *array, unsigned int len)
2002{
2003 int hl, r;
2004
2005 hl = sprintf(outaddr, "\n%s\n", title);
2006
2007 for (r = 0; r < len; r++) {
2008 if ((r % 8) == 0)
2009 hl += sprintf(outaddr+hl, " ");
2010 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2011 if ((r % 8) == 7)
2012 hl += sprintf(outaddr+hl, "\n");
2013 }
2014
2015 hl += sprintf(outaddr+hl, "\n");
2016
2017 return hl;
2018}
2019
2020static int
2021z90crypt_status(char *resp_buff, char **start, off_t offset,
2022 int count, int *eof, void *data)
2023{
2024 unsigned char *workarea;
2025 int len;
2026
2027 /* resp_buff is a page. Use the right half for a work area */
2028 workarea = resp_buff+2000;
2029 len = 0;
2030 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2031 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2032 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2033 get_status_domain_index());
2034 len += sprintf(resp_buff+len, "Total device count: %d\n",
2035 get_status_totalcount());
2036 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2037 get_status_PCICAcount());
2038 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2039 get_status_PCICCcount());
2040 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2041 get_status_PCIXCCMCL2count());
2042 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2043 get_status_PCIXCCMCL3count());
2044 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2045 get_status_CEX2Ccount());
2046 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2047 get_status_CEX2Acount());
2048 len += sprintf(resp_buff+len, "requestq count: %d\n",
2049 get_status_requestq_count());
2050 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2051 get_status_pendingq_count());
2052 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2053 get_status_totalopen_count());
2054 len += sprinthx(
2055 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2056 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2057 resp_buff+len,
2058 get_status_status_mask(workarea),
2059 Z90CRYPT_NUM_APS);
2060 len += sprinthx("Waiting work element counts",
2061 resp_buff+len,
2062 get_status_qdepth_mask(workarea),
2063 Z90CRYPT_NUM_APS);
2064 len += sprinthx4(
2065 "Per-device successfully completed request counts",
2066 resp_buff+len,
2067 get_status_perdevice_reqcnt((unsigned int *)workarea),
2068 Z90CRYPT_NUM_APS);
2069 *eof = 1;
2070 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2071 return len;
2072}
2073
2074static inline void
2075disable_card(int card_index)
2076{
2077 struct device *devp;
2078
2079 devp = LONG2DEVPTR(card_index);
2080 if (!devp || devp->user_disabled)
2081 return;
2082 devp->user_disabled = 1;
2083 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2084 if (devp->dev_type == -1)
2085 return;
2086 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2087}
2088
2089static inline void
2090enable_card(int card_index)
2091{
2092 struct device *devp;
2093
2094 devp = LONG2DEVPTR(card_index);
2095 if (!devp || !devp->user_disabled)
2096 return;
2097 devp->user_disabled = 0;
2098 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2099 if (devp->dev_type == -1)
2100 return;
2101 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2102}
2103
2104static int
2105z90crypt_status_write(struct file *file, const char __user *buffer,
2106 unsigned long count, void *data)
2107{
2108 int j, eol;
2109 unsigned char *lbuf, *ptr;
2110 unsigned int local_count;
2111
2112#define LBUFSIZE 1200
2113 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2114 if (!lbuf) {
2115 PRINTK("kmalloc failed!\n");
2116 return 0;
2117 }
2118
2119 if (count <= 0)
2120 return 0;
2121
2122 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2123
2124 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2125 kfree(lbuf);
2126 return -EFAULT;
2127 }
2128
2129 lbuf[local_count] = '\0';
2130
2131 ptr = strstr(lbuf, "Online devices");
2132 if (ptr == 0) {
2133 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2134 kfree(lbuf);
2135 return count;
2136 }
2137
2138 ptr = strstr(ptr, "\n");
2139 if (ptr == 0) {
2140 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2141 kfree(lbuf);
2142 return count;
2143 }
2144 ptr++;
2145
2146 if (strstr(ptr, "Waiting work element counts") == NULL) {
2147 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2148 kfree(lbuf);
2149 return count;
2150 }
2151
2152 j = 0;
2153 eol = 0;
2154 while ((j < 64) && (*ptr != '\0')) {
2155 switch (*ptr) {
2156 case '\t':
2157 case ' ':
2158 break;
2159 case '\n':
2160 default:
2161 eol = 1;
2162 break;
2163 case '0': // no device
2164 case '1': // PCICA
2165 case '2': // PCICC
2166 case '3': // PCIXCC_MCL2
2167 case '4': // PCIXCC_MCL3
2168 case '5': // CEX2C
2169 case '6': // CEX2A
2170 j++;
2171 break;
2172 case 'd':
2173 case 'D':
2174 disable_card(j);
2175 j++;
2176 break;
2177 case 'e':
2178 case 'E':
2179 enable_card(j);
2180 j++;
2181 break;
2182 }
2183 if (eol)
2184 break;
2185 ptr++;
2186 }
2187
2188 kfree(lbuf);
2189 return count;
2190}
2191
2192/**
2193 * Functions that run under a timer, with no process id
2194 *
2195 * The task functions:
2196 * z90crypt_reader_task
2197 * helper_send_work
2198 * helper_handle_work_element
2199 * helper_receive_rc
2200 * z90crypt_config_task
2201 * z90crypt_cleanup_task
2202 *
2203 * Helper functions:
2204 * z90crypt_schedule_reader_timer
2205 * z90crypt_schedule_reader_task
2206 * z90crypt_schedule_config_task
2207 * z90crypt_schedule_cleanup_task
2208 */
2209static inline int
2210receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2211 unsigned char *buff, unsigned char __user **dest_p_p)
2212{
2213 int dv, rv;
2214 struct device *dev_ptr;
2215 struct caller *caller_p;
2216 struct ica_rsa_modexpo *icaMsg_p;
2217 struct list_head *ptr, *tptr;
2218
2219 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2220
2221 if (z90crypt.terminating)
2222 return REC_FATAL_ERROR;
2223
2224 caller_p = 0;
2225 dev_ptr = z90crypt.device_p[index];
2226 rv = 0;
2227 do {
2228 if (!dev_ptr || dev_ptr->disabled) {
2229 rv = REC_NO_WORK; // a disabled device can't return work
2230 break;
2231 }
2232 if (dev_ptr->dev_self_x != index) {
2233 PRINTKC("Corrupt dev ptr\n");
2234 z90crypt.terminating = 1;
2235 rv = REC_FATAL_ERROR;
2236 break;
2237 }
2238 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2239 dv = DEV_REC_EXCEPTION;
2240 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2241 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2242 } else {
2243 PDEBUG("Dequeue called for device %d\n", index);
2244 dv = receive_from_AP(index, z90crypt.cdx,
2245 dev_ptr->dev_resp_l,
2246 dev_ptr->dev_resp_p, psmid);
2247 }
2248 switch (dv) {
2249 case DEV_REC_EXCEPTION:
2250 rv = REC_FATAL_ERROR;
2251 z90crypt.terminating = 1;
2252 PRINTKC("Exception in receive from device %d\n",
2253 index);
2254 break;
2255 case DEV_ONLINE:
2256 rv = 0;
2257 break;
2258 case DEV_EMPTY:
2259 rv = REC_EMPTY;
2260 break;
2261 case DEV_NO_WORK:
2262 rv = REC_NO_WORK;
2263 break;
2264 case DEV_BAD_MESSAGE:
2265 case DEV_GONE:
2266 case REC_HARDWAR_ERR:
2267 default:
2268 rv = REC_NO_RESPONSE;
2269 break;
2270 }
2271 if (rv)
2272 break;
2273 if (dev_ptr->dev_caller_count <= 0) {
2274 rv = REC_USER_GONE;
2275 break;
2276 }
2277
2278 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2279 caller_p = list_entry(ptr, struct caller, caller_liste);
2280 if (!memcmp(caller_p->caller_id, psmid,
2281 sizeof(caller_p->caller_id))) {
2282 if (!list_empty(&caller_p->caller_liste)) {
2283 list_del_init(ptr);
2284 dev_ptr->dev_caller_count--;
2285 break;
2286 }
2287 }
2288 caller_p = 0;
2289 }
2290 if (!caller_p) {
2291 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2292 "%02X%02X%02X in device list\n",
2293 psmid[0], psmid[1], psmid[2], psmid[3],
2294 psmid[4], psmid[5], psmid[6], psmid[7]);
2295 rv = REC_USER_GONE;
2296 break;
2297 }
2298
2299 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2300 rv = convert_response(dev_ptr->dev_resp_p,
2301 caller_p->caller_buf_p, buff_len_p, buff);
2302 switch (rv) {
2303 case REC_USE_PCICA:
2304 break;
2305 case REC_OPERAND_INV:
2306 case REC_OPERAND_SIZE:
2307 case REC_EVEN_MOD:
2308 case REC_INVALID_PAD:
2309 PDEBUG("device %d: 'user error' %d\n", index, rv);
2310 break;
2311 case WRONG_DEVICE_TYPE:
2312 case REC_HARDWAR_ERR:
2313 case REC_BAD_MESSAGE:
2314 PRINTKW("device %d: hardware error %d\n", index, rv);
2315 rv = REC_NO_RESPONSE;
2316 break;
2317 default:
2318 PDEBUG("device %d: rv = %d\n", index, rv);
2319 break;
2320 }
2321 } while (0);
2322
2323 switch (rv) {
2324 case 0:
2325 PDEBUG("Successful receive from device %d\n", index);
2326 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2327 *dest_p_p = icaMsg_p->outputdata;
2328 if (*buff_len_p == 0)
2329 PRINTK("Zero *buff_len_p\n");
2330 break;
2331 case REC_NO_RESPONSE:
2332 PRINTKW("Removing device %d from availability\n", index);
2333 remove_device(dev_ptr);
2334 break;
2335 }
2336
2337 if (caller_p)
2338 unbuild_caller(dev_ptr, caller_p);
2339
2340 return rv;
2341}
2342
2343static inline void
2344helper_send_work(int index)
2345{
2346 struct work_element *rq_p;
2347 int rv;
2348
2349 if (list_empty(&request_list))
2350 return;
2351 requestq_count--;
2352 rq_p = list_entry(request_list.next, struct work_element, liste);
2353 list_del_init(&rq_p->liste);
2354 rq_p->audit[1] |= FP_REMREQUEST;
2355 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2356 rq_p->devindex = SHRT2LONG(index);
2357 rv = send_to_crypto_device(rq_p);
2358 if (rv == 0) {
2359 rq_p->requestsent = jiffies;
2360 rq_p->audit[0] |= FP_SENT;
2361 list_add_tail(&rq_p->liste, &pending_list);
2362 ++pendingq_count;
2363 rq_p->audit[0] |= FP_PENDING;
2364 } else {
2365 switch (rv) {
2366 case REC_OPERAND_INV:
2367 case REC_OPERAND_SIZE:
2368 case REC_EVEN_MOD:
2369 case REC_INVALID_PAD:
2370 rq_p->retcode = -EINVAL;
2371 break;
2372 case SEN_NOT_AVAIL:
2373 case SEN_RETRY:
2374 case REC_NO_RESPONSE:
2375 default:
2376 if (z90crypt.mask.st_count > 1)
2377 rq_p->retcode =
2378 -ERESTARTSYS;
2379 else
2380 rq_p->retcode = -ENODEV;
2381 break;
2382 }
2383 rq_p->status[0] |= STAT_FAILED;
2384 rq_p->audit[1] |= FP_AWAKENING;
2385 atomic_set(&rq_p->alarmrung, 1);
2386 wake_up(&rq_p->waitq);
2387 }
2388 } else {
2389 if (z90crypt.mask.st_count > 1)
2390 rq_p->retcode = -ERESTARTSYS;
2391 else
2392 rq_p->retcode = -ENODEV;
2393 rq_p->status[0] |= STAT_FAILED;
2394 rq_p->audit[1] |= FP_AWAKENING;
2395 atomic_set(&rq_p->alarmrung, 1);
2396 wake_up(&rq_p->waitq);
2397 }
2398}
2399
2400static inline void
2401helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2402 int buff_len, unsigned char *buff,
2403 unsigned char __user *resp_addr)
2404{
2405 struct work_element *pq_p;
2406 struct list_head *lptr, *tptr;
2407
2408 pq_p = 0;
2409 list_for_each_safe(lptr, tptr, &pending_list) {
2410 pq_p = list_entry(lptr, struct work_element, liste);
2411 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2412 list_del_init(lptr);
2413 pendingq_count--;
2414 pq_p->audit[1] |= FP_NOTPENDING;
2415 break;
2416 }
2417 pq_p = 0;
2418 }
2419
2420 if (!pq_p) {
2421 PRINTK("device %d has work but no caller exists on pending Q\n",
2422 SHRT2LONG(index));
2423 return;
2424 }
2425
2426 switch (rc) {
2427 case 0:
2428 pq_p->resp_buff_size = buff_len;
2429 pq_p->audit[1] |= FP_RESPSIZESET;
2430 if (buff_len) {
2431 pq_p->resp_addr = resp_addr;
2432 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2433 memcpy(pq_p->resp_buff, buff, buff_len);
2434 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2435 }
2436 break;
2437 case REC_OPERAND_INV:
2438 case REC_OPERAND_SIZE:
2439 case REC_EVEN_MOD:
2440 case REC_INVALID_PAD:
2441 PDEBUG("-EINVAL after application error %d\n", rc);
2442 pq_p->retcode = -EINVAL;
2443 pq_p->status[0] |= STAT_FAILED;
2444 break;
2445 case REC_USE_PCICA:
2446 pq_p->retcode = -ERESTARTSYS;
2447 pq_p->status[0] |= STAT_FAILED;
2448 break;
2449 case REC_NO_RESPONSE:
2450 default:
2451 if (z90crypt.mask.st_count > 1)
2452 pq_p->retcode = -ERESTARTSYS;
2453 else
2454 pq_p->retcode = -ENODEV;
2455 pq_p->status[0] |= STAT_FAILED;
2456 break;
2457 }
2458 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2459 pq_p->audit[1] |= FP_AWAKENING;
2460 atomic_set(&pq_p->alarmrung, 1);
2461 wake_up(&pq_p->waitq);
2462 }
2463}
2464
2465/**
2466 * return TRUE if the work element should be removed from the queue
2467 */
2468static inline int
2469helper_receive_rc(int index, int *rc_p)
2470{
2471 switch (*rc_p) {
2472 case 0:
2473 case REC_OPERAND_INV:
2474 case REC_OPERAND_SIZE:
2475 case REC_EVEN_MOD:
2476 case REC_INVALID_PAD:
2477 case REC_USE_PCICA:
2478 break;
2479
2480 case REC_BUSY:
2481 case REC_NO_WORK:
2482 case REC_EMPTY:
2483 case REC_RETRY_DEV:
2484 case REC_FATAL_ERROR:
2485 return 0;
2486
2487 case REC_NO_RESPONSE:
2488 break;
2489
2490 default:
2491 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2492 *rc_p, SHRT2LONG(index));
2493 *rc_p = REC_NO_RESPONSE;
2494 break;
2495 }
2496 return 1;
2497}
2498
2499static inline void
2500z90crypt_schedule_reader_timer(void)
2501{
2502 if (timer_pending(&reader_timer))
2503 return;
2504 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2505 PRINTK("Timer pending while modifying reader timer\n");
2506}
2507
2508static void
2509z90crypt_reader_task(unsigned long ptr)
2510{
2511 int workavail, index, rc, buff_len;
2512 unsigned char psmid[8];
2513 unsigned char __user *resp_addr;
2514 static unsigned char buff[1024];
2515
2516 /**
2517 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2518 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2519 * loop, there is no work remaining on the queues.
2520 */
2521 resp_addr = 0;
2522 workavail = 2;
2523 buff_len = 0;
2524 while (workavail) {
2525 workavail--;
2526 rc = 0;
2527 spin_lock_irq(&queuespinlock);
2528 memset(buff, 0x00, sizeof(buff));
2529
2530 /* Dequeue once from each device in round robin. */
2531 for (index = 0; index < z90crypt.mask.st_count; index++) {
2532 PDEBUG("About to receive.\n");
2533 rc = receive_from_crypto_device(SHRT2LONG(index),
2534 psmid,
2535 &buff_len,
2536 buff,
2537 &resp_addr);
2538 PDEBUG("Dequeued: rc = %d.\n", rc);
2539
2540 if (helper_receive_rc(index, &rc)) {
2541 if (rc != REC_NO_RESPONSE) {
2542 helper_send_work(index);
2543 workavail = 2;
2544 }
2545
2546 helper_handle_work_element(index, psmid, rc,
2547 buff_len, buff,
2548 resp_addr);
2549 }
2550
2551 if (rc == REC_FATAL_ERROR)
2552 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2553 SHRT2LONG(index));
2554 }
2555 spin_unlock_irq(&queuespinlock);
2556 }
2557
2558 if (pendingq_count + requestq_count)
2559 z90crypt_schedule_reader_timer();
2560}
2561
2562static inline void
2563z90crypt_schedule_config_task(unsigned int expiration)
2564{
2565 if (timer_pending(&config_timer))
2566 return;
2567 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2568 PRINTK("Timer pending while modifying config timer\n");
2569}
2570
2571static void
2572z90crypt_config_task(unsigned long ptr)
2573{
2574 int rc;
2575
2576 PDEBUG("jiffies %ld\n", jiffies);
2577
2578 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2579 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2580 /* If return was fatal, don't bother reconfiguring */
2581 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2582 z90crypt_schedule_config_task(CONFIGTIME);
2583}
2584
2585static inline void
2586z90crypt_schedule_cleanup_task(void)
2587{
2588 if (timer_pending(&cleanup_timer))
2589 return;
2590 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2591 PRINTK("Timer pending while modifying cleanup timer\n");
2592}
2593
2594static inline void
2595helper_drain_queues(void)
2596{
2597 struct work_element *pq_p;
2598 struct list_head *lptr, *tptr;
2599
2600 list_for_each_safe(lptr, tptr, &pending_list) {
2601 pq_p = list_entry(lptr, struct work_element, liste);
2602 pq_p->retcode = -ENODEV;
2603 pq_p->status[0] |= STAT_FAILED;
2604 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2605 (struct caller *)pq_p->requestptr);
2606 list_del_init(lptr);
2607 pendingq_count--;
2608 pq_p->audit[1] |= FP_NOTPENDING;
2609 pq_p->audit[1] |= FP_AWAKENING;
2610 atomic_set(&pq_p->alarmrung, 1);
2611 wake_up(&pq_p->waitq);
2612 }
2613
2614 list_for_each_safe(lptr, tptr, &request_list) {
2615 pq_p = list_entry(lptr, struct work_element, liste);
2616 pq_p->retcode = -ENODEV;
2617 pq_p->status[0] |= STAT_FAILED;
2618 list_del_init(lptr);
2619 requestq_count--;
2620 pq_p->audit[1] |= FP_REMREQUEST;
2621 pq_p->audit[1] |= FP_AWAKENING;
2622 atomic_set(&pq_p->alarmrung, 1);
2623 wake_up(&pq_p->waitq);
2624 }
2625}
2626
2627static inline void
2628helper_timeout_requests(void)
2629{
2630 struct work_element *pq_p;
2631 struct list_head *lptr, *tptr;
2632 long timelimit;
2633
2634 timelimit = jiffies - (CLEANUPTIME * HZ);
2635 /* The list is in strict chronological order */
2636 list_for_each_safe(lptr, tptr, &pending_list) {
2637 pq_p = list_entry(lptr, struct work_element, liste);
2638 if (pq_p->requestsent >= timelimit)
2639 break;
2640 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2641 ((struct caller *)pq_p->requestptr)->caller_id[0],
2642 ((struct caller *)pq_p->requestptr)->caller_id[1],
2643 ((struct caller *)pq_p->requestptr)->caller_id[2],
2644 ((struct caller *)pq_p->requestptr)->caller_id[3],
2645 ((struct caller *)pq_p->requestptr)->caller_id[4],
2646 ((struct caller *)pq_p->requestptr)->caller_id[5],
2647 ((struct caller *)pq_p->requestptr)->caller_id[6],
2648 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2649 pq_p->retcode = -ETIMEOUT;
2650 pq_p->status[0] |= STAT_FAILED;
2651 /* get this off any caller queue it may be on */
2652 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2653 (struct caller *) pq_p->requestptr);
2654 list_del_init(lptr);
2655 pendingq_count--;
2656 pq_p->audit[1] |= FP_TIMEDOUT;
2657 pq_p->audit[1] |= FP_NOTPENDING;
2658 pq_p->audit[1] |= FP_AWAKENING;
2659 atomic_set(&pq_p->alarmrung, 1);
2660 wake_up(&pq_p->waitq);
2661 }
2662
2663 /**
2664 * If pending count is zero, items left on the request queue may
2665 * never be processed.
2666 */
2667 if (pendingq_count <= 0) {
2668 list_for_each_safe(lptr, tptr, &request_list) {
2669 pq_p = list_entry(lptr, struct work_element, liste);
2670 if (pq_p->requestsent >= timelimit)
2671 break;
2672 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2673 ((struct caller *)pq_p->requestptr)->caller_id[0],
2674 ((struct caller *)pq_p->requestptr)->caller_id[1],
2675 ((struct caller *)pq_p->requestptr)->caller_id[2],
2676 ((struct caller *)pq_p->requestptr)->caller_id[3],
2677 ((struct caller *)pq_p->requestptr)->caller_id[4],
2678 ((struct caller *)pq_p->requestptr)->caller_id[5],
2679 ((struct caller *)pq_p->requestptr)->caller_id[6],
2680 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2681 pq_p->retcode = -ETIMEOUT;
2682 pq_p->status[0] |= STAT_FAILED;
2683 list_del_init(lptr);
2684 requestq_count--;
2685 pq_p->audit[1] |= FP_TIMEDOUT;
2686 pq_p->audit[1] |= FP_REMREQUEST;
2687 pq_p->audit[1] |= FP_AWAKENING;
2688 atomic_set(&pq_p->alarmrung, 1);
2689 wake_up(&pq_p->waitq);
2690 }
2691 }
2692}
2693
2694static void
2695z90crypt_cleanup_task(unsigned long ptr)
2696{
2697 PDEBUG("jiffies %ld\n", jiffies);
2698 spin_lock_irq(&queuespinlock);
2699 if (z90crypt.mask.st_count <= 0) // no devices!
2700 helper_drain_queues();
2701 else
2702 helper_timeout_requests();
2703 spin_unlock_irq(&queuespinlock);
2704 z90crypt_schedule_cleanup_task();
2705}
2706
2707static void
2708z90crypt_schedule_reader_task(unsigned long ptr)
2709{
2710 tasklet_schedule(&reader_tasklet);
2711}
2712
2713/**
2714 * Lowlevel Functions:
2715 *
2716 * create_z90crypt: creates and initializes basic data structures
2717 * refresh_z90crypt: re-initializes basic data structures
2718 * find_crypto_devices: returns a count and mask of hardware status
2719 * create_crypto_device: builds the descriptor for a device
2720 * destroy_crypto_device: unallocates the descriptor for a device
2721 * destroy_z90crypt: drains all work, unallocates structs
2722 */
2723
2724/**
2725 * build the z90crypt root structure using the given domain index
2726 */
2727static int
2728create_z90crypt(int *cdx_p)
2729{
2730 struct hdware_block *hdware_blk_p;
2731
2732 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2733 z90crypt.domain_established = 0;
2734 z90crypt.len = sizeof(struct z90crypt);
2735 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2736 z90crypt.cdx = *cdx_p;
2737
2738 hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2739 if (!hdware_blk_p) {
2740 PDEBUG("kmalloc for hardware block failed\n");
2741 return ENOMEM;
2742 }
2743 z90crypt.hdware_info = hdware_blk_p;
2744
2745 return 0;
2746}
2747
2748static inline int
2749helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2750{
2751 enum hdstat hd_stat;
2752 int q_depth, dev_type;
2753 int indx, chkdom, numdomains;
2754
2755 q_depth = dev_type = numdomains = 0;
2756 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2757 for (indx = 0; indx < z90crypt.max_count; indx++) {
2758 hd_stat = HD_NOT_THERE;
2759 numdomains = 0;
2760 for (chkdom = 0; chkdom <= 15; chkdom++) {
2761 hd_stat = query_online(indx, chkdom, MAX_RESET,
2762 &q_depth, &dev_type);
2763 if (hd_stat == HD_TSQ_EXCEPTION) {
2764 z90crypt.terminating = 1;
2765 PRINTKC("exception taken!\n");
2766 break;
2767 }
2768 if (hd_stat == HD_ONLINE) {
2769 cdx_array[numdomains++] = chkdom;
2770 if (*cdx_p == chkdom) {
2771 *correct_cdx_found = 1;
2772 break;
2773 }
2774 }
2775 }
2776 if ((*correct_cdx_found == 1) || (numdomains != 0))
2777 break;
2778 if (z90crypt.terminating)
2779 break;
2780 }
2781 return numdomains;
2782}
2783
2784static inline int
2785probe_crypto_domain(int *cdx_p)
2786{
2787 int cdx_array[16];
2788 char cdx_array_text[53], temp[5];
2789 int correct_cdx_found, numdomains;
2790
2791 correct_cdx_found = 0;
2792 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2793
2794 if (z90crypt.terminating)
2795 return TSQ_FATAL_ERROR;
2796
2797 if (correct_cdx_found)
2798 return 0;
2799
2800 if (numdomains == 0) {
2801 PRINTKW("Unable to find crypto domain: No devices found\n");
2802 return Z90C_NO_DEVICES;
2803 }
2804
2805 if (numdomains == 1) {
2806 if (*cdx_p == -1) {
2807 *cdx_p = cdx_array[0];
2808 return 0;
2809 }
2810 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2811 *cdx_p, cdx_array[0]);
2812 return Z90C_INCORRECT_DOMAIN;
2813 }
2814
2815 numdomains--;
2816 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2817 while (numdomains) {
2818 numdomains--;
2819 sprintf(temp, ", %d", cdx_array[numdomains]);
2820 strcat(cdx_array_text, temp);
2821 }
2822
2823 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2824 *cdx_p, cdx_array_text);
2825 return Z90C_AMBIGUOUS_DOMAIN;
2826}
2827
2828static int
2829refresh_z90crypt(int *cdx_p)
2830{
2831 int i, j, indx, rv;
2832 static struct status local_mask;
2833 struct device *devPtr;
2834 unsigned char oldStat, newStat;
2835 int return_unchanged;
2836
2837 if (z90crypt.len != sizeof(z90crypt))
2838 return ENOTINIT;
2839 if (z90crypt.terminating)
2840 return TSQ_FATAL_ERROR;
2841 rv = 0;
2842 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2843 !z90crypt.domain_established) {
2844 rv = probe_crypto_domain(cdx_p);
2845 if (z90crypt.terminating)
2846 return TSQ_FATAL_ERROR;
2847 if (rv == Z90C_NO_DEVICES)
2848 return 0; // try later
2849 if (rv)
2850 return rv;
2851 z90crypt.cdx = *cdx_p;
2852 z90crypt.domain_established = 1;
2853 }
2854 rv = find_crypto_devices(&local_mask);
2855 if (rv) {
2856 PRINTK("find crypto devices returned %d\n", rv);
2857 return rv;
2858 }
2859 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2860 sizeof(struct status))) {
2861 return_unchanged = 1;
2862 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2863 /**
2864 * Check for disabled cards. If any device is marked
2865 * disabled, destroy it.
2866 */
2867 for (j = 0;
2868 j < z90crypt.hdware_info->type_mask[i].st_count;
2869 j++) {
2870 indx = z90crypt.hdware_info->type_x_addr[i].
2871 device_index[j];
2872 devPtr = z90crypt.device_p[indx];
2873 if (devPtr && devPtr->disabled) {
2874 local_mask.st_mask[indx] = HD_NOT_THERE;
2875 return_unchanged = 0;
2876 }
2877 }
2878 }
2879 if (return_unchanged == 1)
2880 return 0;
2881 }
2882
2883 spin_lock_irq(&queuespinlock);
2884 for (i = 0; i < z90crypt.max_count; i++) {
2885 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2886 newStat = local_mask.st_mask[i];
2887 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2888 destroy_crypto_device(i);
2889 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2890 rv = create_crypto_device(i);
2891 if (rv >= REC_FATAL_ERROR)
2892 return rv;
2893 if (rv != 0) {
2894 local_mask.st_mask[i] = HD_NOT_THERE;
2895 local_mask.st_count--;
2896 }
2897 }
2898 }
2899 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2900 sizeof(local_mask.st_mask));
2901 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2902 z90crypt.hdware_info->hdware_mask.disabled_count =
2903 local_mask.disabled_count;
2904 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2905 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2906 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2907 &(z90crypt.hdware_info->type_x_addr[i]));
2908 spin_unlock_irq(&queuespinlock);
2909
2910 return rv;
2911}
2912
2913static int
2914find_crypto_devices(struct status *deviceMask)
2915{
2916 int i, q_depth, dev_type;
2917 enum hdstat hd_stat;
2918
2919 deviceMask->st_count = 0;
2920 deviceMask->disabled_count = 0;
2921 deviceMask->user_disabled_count = 0;
2922
2923 for (i = 0; i < z90crypt.max_count; i++) {
2924 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2925 &dev_type);
2926 if (hd_stat == HD_TSQ_EXCEPTION) {
2927 z90crypt.terminating = 1;
2928 PRINTKC("Exception during probe for crypto devices\n");
2929 return TSQ_FATAL_ERROR;
2930 }
2931 deviceMask->st_mask[i] = hd_stat;
2932 if (hd_stat == HD_ONLINE) {
2933 PDEBUG("Got an online crypto!: %d\n", i);
2934 PDEBUG("Got a queue depth of %d\n", q_depth);
2935 PDEBUG("Got a device type of %d\n", dev_type);
2936 if (q_depth <= 0)
2937 return TSQ_FATAL_ERROR;
2938 deviceMask->st_count++;
2939 z90crypt.q_depth_array[i] = q_depth;
2940 z90crypt.dev_type_array[i] = dev_type;
2941 }
2942 }
2943
2944 return 0;
2945}
2946
2947static int
2948refresh_index_array(struct status *status_str, struct device_x *index_array)
2949{
2950 int i, count;
2951 enum devstat stat;
2952
2953 i = -1;
2954 count = 0;
2955 do {
2956 stat = status_str->st_mask[++i];
2957 if (stat == DEV_ONLINE)
2958 index_array->device_index[count++] = i;
2959 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2960
2961 return count;
2962}
2963
2964static int
2965create_crypto_device(int index)
2966{
2967 int rv, devstat, total_size;
2968 struct device *dev_ptr;
2969 struct status *type_str_p;
2970 int deviceType;
2971
2972 dev_ptr = z90crypt.device_p[index];
2973 if (!dev_ptr) {
2974 total_size = sizeof(struct device) +
2975 z90crypt.q_depth_array[index] * sizeof(int);
2976
2977 dev_ptr = kzalloc(total_size, GFP_ATOMIC);
2978 if (!dev_ptr) {
2979 PRINTK("kmalloc device %d failed\n", index);
2980 return ENOMEM;
2981 }
2982 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2983 if (!dev_ptr->dev_resp_p) {
2984 kfree(dev_ptr);
2985 PRINTK("kmalloc device %d rec buffer failed\n", index);
2986 return ENOMEM;
2987 }
2988 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2989 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2990 }
2991
2992 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2993 if (devstat == DEV_RSQ_EXCEPTION) {
2994 PRINTK("exception during reset device %d\n", index);
2995 kfree(dev_ptr->dev_resp_p);
2996 kfree(dev_ptr);
2997 return RSQ_FATAL_ERROR;
2998 }
2999 if (devstat == DEV_ONLINE) {
3000 dev_ptr->dev_self_x = index;
3001 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3002 if (dev_ptr->dev_type == NILDEV) {
3003 rv = probe_device_type(dev_ptr);
3004 if (rv) {
3005 PRINTK("rv = %d from probe_device_type %d\n",
3006 rv, index);
3007 kfree(dev_ptr->dev_resp_p);
3008 kfree(dev_ptr);
3009 return rv;
3010 }
3011 }
3012 if (dev_ptr->dev_type == PCIXCC_UNK) {
3013 rv = probe_PCIXCC_type(dev_ptr);
3014 if (rv) {
3015 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3016 rv, index);
3017 kfree(dev_ptr->dev_resp_p);
3018 kfree(dev_ptr);
3019 return rv;
3020 }
3021 }
3022 deviceType = dev_ptr->dev_type;
3023 z90crypt.dev_type_array[index] = deviceType;
3024 if (deviceType == PCICA)
3025 z90crypt.hdware_info->device_type_array[index] = 1;
3026 else if (deviceType == PCICC)
3027 z90crypt.hdware_info->device_type_array[index] = 2;
3028 else if (deviceType == PCIXCC_MCL2)
3029 z90crypt.hdware_info->device_type_array[index] = 3;
3030 else if (deviceType == PCIXCC_MCL3)
3031 z90crypt.hdware_info->device_type_array[index] = 4;
3032 else if (deviceType == CEX2C)
3033 z90crypt.hdware_info->device_type_array[index] = 5;
3034 else if (deviceType == CEX2A)
3035 z90crypt.hdware_info->device_type_array[index] = 6;
3036 else // No idea how this would happen.
3037 z90crypt.hdware_info->device_type_array[index] = -1;
3038 }
3039
3040 /**
3041 * 'q_depth' returned by the hardware is one less than
3042 * the actual depth
3043 */
3044 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3045 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3046 dev_ptr->dev_stat = devstat;
3047 dev_ptr->disabled = 0;
3048 z90crypt.device_p[index] = dev_ptr;
3049
3050 if (devstat == DEV_ONLINE) {
3051 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3052 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3053 z90crypt.mask.st_count++;
3054 }
3055 deviceType = dev_ptr->dev_type;
3056 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3057 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3058 type_str_p->st_mask[index] = DEV_ONLINE;
3059 type_str_p->st_count++;
3060 }
3061 }
3062
3063 return 0;
3064}
3065
3066static int
3067destroy_crypto_device(int index)
3068{
3069 struct device *dev_ptr;
3070 int t, disabledFlag;
3071
3072 dev_ptr = z90crypt.device_p[index];
3073
3074 /* remember device type; get rid of device struct */
3075 if (dev_ptr) {
3076 disabledFlag = dev_ptr->disabled;
3077 t = dev_ptr->dev_type;
3078 kfree(dev_ptr->dev_resp_p);
3079 kfree(dev_ptr);
3080 } else {
3081 disabledFlag = 0;
3082 t = -1;
3083 }
3084 z90crypt.device_p[index] = 0;
3085
3086 /* if the type is valid, remove the device from the type_mask */
3087 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3088 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3089 z90crypt.hdware_info->type_mask[t].st_count--;
3090 if (disabledFlag == 1)
3091 z90crypt.hdware_info->type_mask[t].disabled_count--;
3092 }
3093 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3094 z90crypt.mask.st_mask[index] = DEV_GONE;
3095 z90crypt.mask.st_count--;
3096 }
3097 z90crypt.hdware_info->device_type_array[index] = 0;
3098
3099 return 0;
3100}
3101
3102static void
3103destroy_z90crypt(void)
3104{
3105 int i;
3106
3107 for (i = 0; i < z90crypt.max_count; i++)
3108 if (z90crypt.device_p[i])
3109 destroy_crypto_device(i);
3110 kfree(z90crypt.hdware_info);
3111 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3112}
3113
3114static unsigned char static_testmsg[384] = {
31150x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
31160x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
31170x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
31180x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
31190x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31200x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31210x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
31220x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31230xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31240x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31260x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31270x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
31280x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
31290x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
31300x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
31310x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
31320x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
31330x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
31340x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
31350x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
31360xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
31370x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
31380x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3139};
3140
3141static int
3142probe_device_type(struct device *devPtr)
3143{
3144 int rv, dv, i, index, length;
3145 unsigned char psmid[8];
3146 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3147
3148 index = devPtr->dev_self_x;
3149 rv = 0;
3150 do {
3151 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3152 length = sizeof(static_testmsg) - 24;
3153 /* the -24 allows for the header */
3154 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3155 if (dv) {
3156 PDEBUG("dv returned by send during probe: %d\n", dv);
3157 if (dv == DEV_SEN_EXCEPTION) {
3158 rv = SEN_FATAL_ERROR;
3159 PRINTKC("exception in send to AP %d\n", index);
3160 break;
3161 }
3162 PDEBUG("return value from send_to_AP: %d\n", rv);
3163 switch (dv) {
3164 case DEV_GONE:
3165 PDEBUG("dev %d not available\n", index);
3166 rv = SEN_NOT_AVAIL;
3167 break;
3168 case DEV_ONLINE:
3169 rv = 0;
3170 break;
3171 case DEV_EMPTY:
3172 rv = SEN_NOT_AVAIL;
3173 break;
3174 case DEV_NO_WORK:
3175 rv = SEN_FATAL_ERROR;
3176 break;
3177 case DEV_BAD_MESSAGE:
3178 rv = SEN_USER_ERROR;
3179 break;
3180 case DEV_QUEUE_FULL:
3181 rv = SEN_QUEUE_FULL;
3182 break;
3183 default:
3184 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3185 rv = SEN_NOT_AVAIL;
3186 break;
3187 }
3188 }
3189
3190 if (rv)
3191 break;
3192
3193 for (i = 0; i < 6; i++) {
3194 mdelay(300);
3195 dv = receive_from_AP(index, z90crypt.cdx,
3196 devPtr->dev_resp_l,
3197 devPtr->dev_resp_p, psmid);
3198 PDEBUG("dv returned by DQ = %d\n", dv);
3199 if (dv == DEV_REC_EXCEPTION) {
3200 rv = REC_FATAL_ERROR;
3201 PRINTKC("exception in dequeue %d\n",
3202 index);
3203 break;
3204 }
3205 switch (dv) {
3206 case DEV_ONLINE:
3207 rv = 0;
3208 break;
3209 case DEV_EMPTY:
3210 rv = REC_EMPTY;
3211 break;
3212 case DEV_NO_WORK:
3213 rv = REC_NO_WORK;
3214 break;
3215 case DEV_BAD_MESSAGE:
3216 case DEV_GONE:
3217 default:
3218 rv = REC_NO_RESPONSE;
3219 break;
3220 }
3221 if ((rv != 0) && (rv != REC_NO_WORK))
3222 break;
3223 if (rv == 0)
3224 break;
3225 }
3226 if (rv)
3227 break;
3228 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3229 (devPtr->dev_resp_p[1] == 0x86);
3230 if (rv)
3231 devPtr->dev_type = PCICC;
3232 else
3233 devPtr->dev_type = PCICA;
3234 rv = 0;
3235 } while (0);
3236 /* In a general error case, the card is not marked online */
3237 return rv;
3238}
3239
3240static unsigned char MCL3_testmsg[] = {
32410x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
32420x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32430x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32440x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32450x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
32460x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
32470x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
32480x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
32490x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32580x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32590x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32610x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
32620x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
32630x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
32640xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
32650x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
32660x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
32670x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
32680x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
32690x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
32700xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
32710xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
32720x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
32730x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
32740xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
32750x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3276};
3277
3278static int
3279probe_PCIXCC_type(struct device *devPtr)
3280{
3281 int rv, dv, i, index, length;
3282 unsigned char psmid[8];
3283 static unsigned char loc_testmsg[548];
3284 struct CPRBX *cprbx_p;
3285
3286 index = devPtr->dev_self_x;
3287 rv = 0;
3288 do {
3289 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3290 length = sizeof(MCL3_testmsg) - 0x0C;
3291 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3292 if (dv) {
3293 PDEBUG("dv returned = %d\n", dv);
3294 if (dv == DEV_SEN_EXCEPTION) {
3295 rv = SEN_FATAL_ERROR;
3296 PRINTKC("exception in send to AP %d\n", index);
3297 break;
3298 }
3299 PDEBUG("return value from send_to_AP: %d\n", rv);
3300 switch (dv) {
3301 case DEV_GONE:
3302 PDEBUG("dev %d not available\n", index);
3303 rv = SEN_NOT_AVAIL;
3304 break;
3305 case DEV_ONLINE:
3306 rv = 0;
3307 break;
3308 case DEV_EMPTY:
3309 rv = SEN_NOT_AVAIL;
3310 break;
3311 case DEV_NO_WORK:
3312 rv = SEN_FATAL_ERROR;
3313 break;
3314 case DEV_BAD_MESSAGE:
3315 rv = SEN_USER_ERROR;
3316 break;
3317 case DEV_QUEUE_FULL:
3318 rv = SEN_QUEUE_FULL;
3319 break;
3320 default:
3321 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3322 rv = SEN_NOT_AVAIL;
3323 break;
3324 }
3325 }
3326
3327 if (rv)
3328 break;
3329
3330 for (i = 0; i < 6; i++) {
3331 mdelay(300);
3332 dv = receive_from_AP(index, z90crypt.cdx,
3333 devPtr->dev_resp_l,
3334 devPtr->dev_resp_p, psmid);
3335 PDEBUG("dv returned by DQ = %d\n", dv);
3336 if (dv == DEV_REC_EXCEPTION) {
3337 rv = REC_FATAL_ERROR;
3338 PRINTKC("exception in dequeue %d\n",
3339 index);
3340 break;
3341 }
3342 switch (dv) {
3343 case DEV_ONLINE:
3344 rv = 0;
3345 break;
3346 case DEV_EMPTY:
3347 rv = REC_EMPTY;
3348 break;
3349 case DEV_NO_WORK:
3350 rv = REC_NO_WORK;
3351 break;
3352 case DEV_BAD_MESSAGE:
3353 case DEV_GONE:
3354 default:
3355 rv = REC_NO_RESPONSE;
3356 break;
3357 }
3358 if ((rv != 0) && (rv != REC_NO_WORK))
3359 break;
3360 if (rv == 0)
3361 break;
3362 }
3363 if (rv)
3364 break;
3365 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3366 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3367 devPtr->dev_type = PCIXCC_MCL2;
3368 PDEBUG("device %d is MCL2\n", index);
3369 } else {
3370 devPtr->dev_type = PCIXCC_MCL3;
3371 PDEBUG("device %d is MCL3\n", index);
3372 }
3373 } while (0);
3374 /* In a general error case, the card is not marked online */
3375 return rv;
3376}
3377
3378module_init(z90crypt_init_module);
3379module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 000000000000..1edc10a7a6f2
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1091 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/miscdevice.h>
34#include <linux/fs.h>
35#include <linux/proc_fs.h>
36#include <linux/compat.h>
37#include <asm/atomic.h>
38#include <asm/uaccess.h>
39
40#include "zcrypt_api.h"
41
42/**
43 * Module description.
44 */
45MODULE_AUTHOR("IBM Corporation");
46MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
47 "Copyright 2001, 2006 IBM Corporation");
48MODULE_LICENSE("GPL");
49
50static DEFINE_SPINLOCK(zcrypt_device_lock);
51static LIST_HEAD(zcrypt_device_list);
52static int zcrypt_device_count = 0;
53static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
54
55/**
56 * Device attributes common for all crypto devices.
57 */
58static ssize_t zcrypt_type_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
62 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
63}
64
65static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
66
67static ssize_t zcrypt_online_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
71 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
72}
73
74static ssize_t zcrypt_online_store(struct device *dev,
75 struct device_attribute *attr,
76 const char *buf, size_t count)
77{
78 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
79 int online;
80
81 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
82 return -EINVAL;
83 zdev->online = online;
84 if (!online)
85 ap_flush_queue(zdev->ap_dev);
86 return count;
87}
88
89static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
90
91static struct attribute * zcrypt_device_attrs[] = {
92 &dev_attr_type.attr,
93 &dev_attr_online.attr,
94 NULL,
95};
96
97static struct attribute_group zcrypt_device_attr_group = {
98 .attrs = zcrypt_device_attrs,
99};
100
101/**
102 * Move the device towards the head of the device list.
103 * Need to be called while holding the zcrypt device list lock.
104 * Note: cards with speed_rating of 0 are kept at the end of the list.
105 */
106static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
107{
108 struct zcrypt_device *tmp;
109 struct list_head *l;
110
111 if (zdev->speed_rating == 0)
112 return;
113 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
114 tmp = list_entry(l, struct zcrypt_device, list);
115 if ((tmp->request_count + 1) * tmp->speed_rating <=
116 (zdev->request_count + 1) * zdev->speed_rating &&
117 tmp->speed_rating != 0)
118 break;
119 }
120 if (l == zdev->list.prev)
121 return;
122 /* Move zdev behind l */
123 list_del(&zdev->list);
124 list_add(&zdev->list, l);
125}
126
127/**
128 * Move the device towards the tail of the device list.
129 * Need to be called while holding the zcrypt device list lock.
130 * Note: cards with speed_rating of 0 are kept at the end of the list.
131 */
132static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
133{
134 struct zcrypt_device *tmp;
135 struct list_head *l;
136
137 if (zdev->speed_rating == 0)
138 return;
139 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
140 tmp = list_entry(l, struct zcrypt_device, list);
141 if ((tmp->request_count + 1) * tmp->speed_rating >
142 (zdev->request_count + 1) * zdev->speed_rating ||
143 tmp->speed_rating == 0)
144 break;
145 }
146 if (l == zdev->list.next)
147 return;
148 /* Move zdev before l */
149 list_del(&zdev->list);
150 list_add_tail(&zdev->list, l);
151}
152
153static void zcrypt_device_release(struct kref *kref)
154{
155 struct zcrypt_device *zdev =
156 container_of(kref, struct zcrypt_device, refcount);
157 zcrypt_device_free(zdev);
158}
159
160void zcrypt_device_get(struct zcrypt_device *zdev)
161{
162 kref_get(&zdev->refcount);
163}
164EXPORT_SYMBOL(zcrypt_device_get);
165
166int zcrypt_device_put(struct zcrypt_device *zdev)
167{
168 return kref_put(&zdev->refcount, zcrypt_device_release);
169}
170EXPORT_SYMBOL(zcrypt_device_put);
171
172struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
173{
174 struct zcrypt_device *zdev;
175
176 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
177 if (!zdev)
178 return NULL;
179 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
180 if (!zdev->reply.message)
181 goto out_free;
182 zdev->reply.length = max_response_size;
183 spin_lock_init(&zdev->lock);
184 INIT_LIST_HEAD(&zdev->list);
185 return zdev;
186
187out_free:
188 kfree(zdev);
189 return NULL;
190}
191EXPORT_SYMBOL(zcrypt_device_alloc);
192
193void zcrypt_device_free(struct zcrypt_device *zdev)
194{
195 kfree(zdev->reply.message);
196 kfree(zdev);
197}
198EXPORT_SYMBOL(zcrypt_device_free);
199
200/**
201 * Register a crypto device.
202 */
203int zcrypt_device_register(struct zcrypt_device *zdev)
204{
205 int rc;
206
207 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
208 &zcrypt_device_attr_group);
209 if (rc)
210 goto out;
211 get_device(&zdev->ap_dev->device);
212 kref_init(&zdev->refcount);
213 spin_lock_bh(&zcrypt_device_lock);
214 zdev->online = 1; /* New devices are online by default. */
215 list_add_tail(&zdev->list, &zcrypt_device_list);
216 __zcrypt_increase_preference(zdev);
217 zcrypt_device_count++;
218 spin_unlock_bh(&zcrypt_device_lock);
219out:
220 return rc;
221}
222EXPORT_SYMBOL(zcrypt_device_register);
223
224/**
225 * Unregister a crypto device.
226 */
227void zcrypt_device_unregister(struct zcrypt_device *zdev)
228{
229 spin_lock_bh(&zcrypt_device_lock);
230 zcrypt_device_count--;
231 list_del_init(&zdev->list);
232 spin_unlock_bh(&zcrypt_device_lock);
233 sysfs_remove_group(&zdev->ap_dev->device.kobj,
234 &zcrypt_device_attr_group);
235 put_device(&zdev->ap_dev->device);
236 zcrypt_device_put(zdev);
237}
238EXPORT_SYMBOL(zcrypt_device_unregister);
239
240/**
241 * zcrypt_read is not be supported beyond zcrypt 1.3.1
242 */
243static ssize_t zcrypt_read(struct file *filp, char __user *buf,
244 size_t count, loff_t *f_pos)
245{
246 return -EPERM;
247}
248
249/**
250 * Write is is not allowed
251 */
252static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
253 size_t count, loff_t *f_pos)
254{
255 return -EPERM;
256}
257
258/**
259 * Device open/close functions to count number of users.
260 */
261static int zcrypt_open(struct inode *inode, struct file *filp)
262{
263 atomic_inc(&zcrypt_open_count);
264 return 0;
265}
266
267static int zcrypt_release(struct inode *inode, struct file *filp)
268{
269 atomic_dec(&zcrypt_open_count);
270 return 0;
271}
272
273/**
274 * zcrypt ioctls.
275 */
276static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
277{
278 struct zcrypt_device *zdev;
279 int rc;
280
281 if (mex->outputdatalength < mex->inputdatalength)
282 return -EINVAL;
283 /**
284 * As long as outputdatalength is big enough, we can set the
285 * outputdatalength equal to the inputdatalength, since that is the
286 * number of bytes we will copy in any case
287 */
288 mex->outputdatalength = mex->inputdatalength;
289
290 spin_lock_bh(&zcrypt_device_lock);
291 list_for_each_entry(zdev, &zcrypt_device_list, list) {
292 if (!zdev->online ||
293 !zdev->ops->rsa_modexpo ||
294 zdev->min_mod_size > mex->inputdatalength ||
295 zdev->max_mod_size < mex->inputdatalength)
296 continue;
297 zcrypt_device_get(zdev);
298 get_device(&zdev->ap_dev->device);
299 zdev->request_count++;
300 __zcrypt_decrease_preference(zdev);
301 spin_unlock_bh(&zcrypt_device_lock);
302 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
303 rc = zdev->ops->rsa_modexpo(zdev, mex);
304 module_put(zdev->ap_dev->drv->driver.owner);
305 }
306 else
307 rc = -EAGAIN;
308 spin_lock_bh(&zcrypt_device_lock);
309 zdev->request_count--;
310 __zcrypt_increase_preference(zdev);
311 put_device(&zdev->ap_dev->device);
312 zcrypt_device_put(zdev);
313 spin_unlock_bh(&zcrypt_device_lock);
314 return rc;
315 }
316 spin_unlock_bh(&zcrypt_device_lock);
317 return -ENODEV;
318}
319
320static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
321{
322 struct zcrypt_device *zdev;
323 unsigned long long z1, z2, z3;
324 int rc, copied;
325
326 if (crt->outputdatalength < crt->inputdatalength ||
327 (crt->inputdatalength & 1))
328 return -EINVAL;
329 /**
330 * As long as outputdatalength is big enough, we can set the
331 * outputdatalength equal to the inputdatalength, since that is the
332 * number of bytes we will copy in any case
333 */
334 crt->outputdatalength = crt->inputdatalength;
335
336 copied = 0;
337 restart:
338 spin_lock_bh(&zcrypt_device_lock);
339 list_for_each_entry(zdev, &zcrypt_device_list, list) {
340 if (!zdev->online ||
341 !zdev->ops->rsa_modexpo_crt ||
342 zdev->min_mod_size > crt->inputdatalength ||
343 zdev->max_mod_size < crt->inputdatalength)
344 continue;
345 if (zdev->short_crt && crt->inputdatalength > 240) {
346 /**
347 * Check inputdata for leading zeros for cards
348 * that can't handle np_prime, bp_key, or
349 * u_mult_inv > 128 bytes.
350 */
351 if (copied == 0) {
352 int len;
353 spin_unlock_bh(&zcrypt_device_lock);
354 /* len is max 256 / 2 - 120 = 8 */
355 len = crt->inputdatalength / 2 - 120;
356 z1 = z2 = z3 = 0;
357 if (copy_from_user(&z1, crt->np_prime, len) ||
358 copy_from_user(&z2, crt->bp_key, len) ||
359 copy_from_user(&z3, crt->u_mult_inv, len))
360 return -EFAULT;
361 copied = 1;
362 /**
363 * We have to restart device lookup -
364 * the device list may have changed by now.
365 */
366 goto restart;
367 }
368 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
369 /* The device can't handle this request. */
370 continue;
371 }
372 zcrypt_device_get(zdev);
373 get_device(&zdev->ap_dev->device);
374 zdev->request_count++;
375 __zcrypt_decrease_preference(zdev);
376 spin_unlock_bh(&zcrypt_device_lock);
377 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
378 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
379 module_put(zdev->ap_dev->drv->driver.owner);
380 }
381 else
382 rc = -EAGAIN;
383 spin_lock_bh(&zcrypt_device_lock);
384 zdev->request_count--;
385 __zcrypt_increase_preference(zdev);
386 put_device(&zdev->ap_dev->device);
387 zcrypt_device_put(zdev);
388 spin_unlock_bh(&zcrypt_device_lock);
389 return rc;
390 }
391 spin_unlock_bh(&zcrypt_device_lock);
392 return -ENODEV;
393}
394
395static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
396{
397 struct zcrypt_device *zdev;
398 int rc;
399
400 spin_lock_bh(&zcrypt_device_lock);
401 list_for_each_entry(zdev, &zcrypt_device_list, list) {
402 if (!zdev->online || !zdev->ops->send_cprb ||
403 (xcRB->user_defined != AUTOSELECT &&
404 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
405 )
406 continue;
407 zcrypt_device_get(zdev);
408 get_device(&zdev->ap_dev->device);
409 zdev->request_count++;
410 __zcrypt_decrease_preference(zdev);
411 spin_unlock_bh(&zcrypt_device_lock);
412 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
413 rc = zdev->ops->send_cprb(zdev, xcRB);
414 module_put(zdev->ap_dev->drv->driver.owner);
415 }
416 else
417 rc = -EAGAIN;
418 spin_lock_bh(&zcrypt_device_lock);
419 zdev->request_count--;
420 __zcrypt_increase_preference(zdev);
421 put_device(&zdev->ap_dev->device);
422 zcrypt_device_put(zdev);
423 spin_unlock_bh(&zcrypt_device_lock);
424 return rc;
425 }
426 spin_unlock_bh(&zcrypt_device_lock);
427 return -ENODEV;
428}
429
430static void zcrypt_status_mask(char status[AP_DEVICES])
431{
432 struct zcrypt_device *zdev;
433
434 memset(status, 0, sizeof(char) * AP_DEVICES);
435 spin_lock_bh(&zcrypt_device_lock);
436 list_for_each_entry(zdev, &zcrypt_device_list, list)
437 status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
438 zdev->online ? zdev->user_space_type : 0x0d;
439 spin_unlock_bh(&zcrypt_device_lock);
440}
441
442static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
443{
444 struct zcrypt_device *zdev;
445
446 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
447 spin_lock_bh(&zcrypt_device_lock);
448 list_for_each_entry(zdev, &zcrypt_device_list, list) {
449 spin_lock(&zdev->ap_dev->lock);
450 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
451 zdev->ap_dev->pendingq_count +
452 zdev->ap_dev->requestq_count;
453 spin_unlock(&zdev->ap_dev->lock);
454 }
455 spin_unlock_bh(&zcrypt_device_lock);
456}
457
458static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
459{
460 struct zcrypt_device *zdev;
461
462 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
463 spin_lock_bh(&zcrypt_device_lock);
464 list_for_each_entry(zdev, &zcrypt_device_list, list) {
465 spin_lock(&zdev->ap_dev->lock);
466 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
467 zdev->ap_dev->total_request_count;
468 spin_unlock(&zdev->ap_dev->lock);
469 }
470 spin_unlock_bh(&zcrypt_device_lock);
471}
472
473static int zcrypt_pendingq_count(void)
474{
475 struct zcrypt_device *zdev;
476 int pendingq_count = 0;
477
478 spin_lock_bh(&zcrypt_device_lock);
479 list_for_each_entry(zdev, &zcrypt_device_list, list) {
480 spin_lock(&zdev->ap_dev->lock);
481 pendingq_count += zdev->ap_dev->pendingq_count;
482 spin_unlock(&zdev->ap_dev->lock);
483 }
484 spin_unlock_bh(&zcrypt_device_lock);
485 return pendingq_count;
486}
487
488static int zcrypt_requestq_count(void)
489{
490 struct zcrypt_device *zdev;
491 int requestq_count = 0;
492
493 spin_lock_bh(&zcrypt_device_lock);
494 list_for_each_entry(zdev, &zcrypt_device_list, list) {
495 spin_lock(&zdev->ap_dev->lock);
496 requestq_count += zdev->ap_dev->requestq_count;
497 spin_unlock(&zdev->ap_dev->lock);
498 }
499 spin_unlock_bh(&zcrypt_device_lock);
500 return requestq_count;
501}
502
503static int zcrypt_count_type(int type)
504{
505 struct zcrypt_device *zdev;
506 int device_count = 0;
507
508 spin_lock_bh(&zcrypt_device_lock);
509 list_for_each_entry(zdev, &zcrypt_device_list, list)
510 if (zdev->user_space_type == type)
511 device_count++;
512 spin_unlock_bh(&zcrypt_device_lock);
513 return device_count;
514}
515
516/**
517 * Old, deprecated combi status call.
518 */
519static long zcrypt_ica_status(struct file *filp, unsigned long arg)
520{
521 struct ica_z90_status *pstat;
522 int ret;
523
524 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
525 if (!pstat)
526 return -ENOMEM;
527 pstat->totalcount = zcrypt_device_count;
528 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
529 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
530 pstat->requestqWaitCount = zcrypt_requestq_count();
531 pstat->pendingqWaitCount = zcrypt_pendingq_count();
532 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
533 pstat->cryptoDomain = ap_domain_index;
534 zcrypt_status_mask(pstat->status);
535 zcrypt_qdepth_mask(pstat->qdepth);
536 ret = 0;
537 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
538 ret = -EFAULT;
539 kfree(pstat);
540 return ret;
541}
542
543static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
544 unsigned long arg)
545{
546 int rc;
547
548 switch (cmd) {
549 case ICARSAMODEXPO: {
550 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
551 struct ica_rsa_modexpo mex;
552 if (copy_from_user(&mex, umex, sizeof(mex)))
553 return -EFAULT;
554 do {
555 rc = zcrypt_rsa_modexpo(&mex);
556 } while (rc == -EAGAIN);
557 if (rc)
558 return rc;
559 return put_user(mex.outputdatalength, &umex->outputdatalength);
560 }
561 case ICARSACRT: {
562 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
563 struct ica_rsa_modexpo_crt crt;
564 if (copy_from_user(&crt, ucrt, sizeof(crt)))
565 return -EFAULT;
566 do {
567 rc = zcrypt_rsa_crt(&crt);
568 } while (rc == -EAGAIN);
569 if (rc)
570 return rc;
571 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
572 }
573 case ZSECSENDCPRB: {
574 struct ica_xcRB __user *uxcRB = (void __user *) arg;
575 struct ica_xcRB xcRB;
576 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
577 return -EFAULT;
578 do {
579 rc = zcrypt_send_cprb(&xcRB);
580 } while (rc == -EAGAIN);
581 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
582 return -EFAULT;
583 return rc;
584 }
585 case Z90STAT_STATUS_MASK: {
586 char status[AP_DEVICES];
587 zcrypt_status_mask(status);
588 if (copy_to_user((char __user *) arg, status,
589 sizeof(char) * AP_DEVICES))
590 return -EFAULT;
591 return 0;
592 }
593 case Z90STAT_QDEPTH_MASK: {
594 char qdepth[AP_DEVICES];
595 zcrypt_qdepth_mask(qdepth);
596 if (copy_to_user((char __user *) arg, qdepth,
597 sizeof(char) * AP_DEVICES))
598 return -EFAULT;
599 return 0;
600 }
601 case Z90STAT_PERDEV_REQCNT: {
602 int reqcnt[AP_DEVICES];
603 zcrypt_perdev_reqcnt(reqcnt);
604 if (copy_to_user((int __user *) arg, reqcnt,
605 sizeof(int) * AP_DEVICES))
606 return -EFAULT;
607 return 0;
608 }
609 case Z90STAT_REQUESTQ_COUNT:
610 return put_user(zcrypt_requestq_count(), (int __user *) arg);
611 case Z90STAT_PENDINGQ_COUNT:
612 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
613 case Z90STAT_TOTALOPEN_COUNT:
614 return put_user(atomic_read(&zcrypt_open_count),
615 (int __user *) arg);
616 case Z90STAT_DOMAIN_INDEX:
617 return put_user(ap_domain_index, (int __user *) arg);
618 /**
619 * Deprecated ioctls. Don't add another device count ioctl,
620 * you can count them yourself in the user space with the
621 * output of the Z90STAT_STATUS_MASK ioctl.
622 */
623 case ICAZ90STATUS:
624 return zcrypt_ica_status(filp, arg);
625 case Z90STAT_TOTALCOUNT:
626 return put_user(zcrypt_device_count, (int __user *) arg);
627 case Z90STAT_PCICACOUNT:
628 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
629 (int __user *) arg);
630 case Z90STAT_PCICCCOUNT:
631 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
632 (int __user *) arg);
633 case Z90STAT_PCIXCCMCL2COUNT:
634 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
635 (int __user *) arg);
636 case Z90STAT_PCIXCCMCL3COUNT:
637 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
638 (int __user *) arg);
639 case Z90STAT_PCIXCCCOUNT:
640 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
641 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
642 (int __user *) arg);
643 case Z90STAT_CEX2CCOUNT:
644 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
645 (int __user *) arg);
646 case Z90STAT_CEX2ACOUNT:
647 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
648 (int __user *) arg);
649 default:
650 /* unknown ioctl number */
651 return -ENOIOCTLCMD;
652 }
653}
654
655#ifdef CONFIG_COMPAT
656/**
657 * ioctl32 conversion routines
658 */
659struct compat_ica_rsa_modexpo {
660 compat_uptr_t inputdata;
661 unsigned int inputdatalength;
662 compat_uptr_t outputdata;
663 unsigned int outputdatalength;
664 compat_uptr_t b_key;
665 compat_uptr_t n_modulus;
666};
667
668static long trans_modexpo32(struct file *filp, unsigned int cmd,
669 unsigned long arg)
670{
671 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
672 struct compat_ica_rsa_modexpo mex32;
673 struct ica_rsa_modexpo mex64;
674 long rc;
675
676 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
677 return -EFAULT;
678 mex64.inputdata = compat_ptr(mex32.inputdata);
679 mex64.inputdatalength = mex32.inputdatalength;
680 mex64.outputdata = compat_ptr(mex32.outputdata);
681 mex64.outputdatalength = mex32.outputdatalength;
682 mex64.b_key = compat_ptr(mex32.b_key);
683 mex64.n_modulus = compat_ptr(mex32.n_modulus);
684 do {
685 rc = zcrypt_rsa_modexpo(&mex64);
686 } while (rc == -EAGAIN);
687 if (!rc)
688 rc = put_user(mex64.outputdatalength,
689 &umex32->outputdatalength);
690 return rc;
691}
692
693struct compat_ica_rsa_modexpo_crt {
694 compat_uptr_t inputdata;
695 unsigned int inputdatalength;
696 compat_uptr_t outputdata;
697 unsigned int outputdatalength;
698 compat_uptr_t bp_key;
699 compat_uptr_t bq_key;
700 compat_uptr_t np_prime;
701 compat_uptr_t nq_prime;
702 compat_uptr_t u_mult_inv;
703};
704
705static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
706 unsigned long arg)
707{
708 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
709 struct compat_ica_rsa_modexpo_crt crt32;
710 struct ica_rsa_modexpo_crt crt64;
711 long rc;
712
713 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
714 return -EFAULT;
715 crt64.inputdata = compat_ptr(crt32.inputdata);
716 crt64.inputdatalength = crt32.inputdatalength;
717 crt64.outputdata= compat_ptr(crt32.outputdata);
718 crt64.outputdatalength = crt32.outputdatalength;
719 crt64.bp_key = compat_ptr(crt32.bp_key);
720 crt64.bq_key = compat_ptr(crt32.bq_key);
721 crt64.np_prime = compat_ptr(crt32.np_prime);
722 crt64.nq_prime = compat_ptr(crt32.nq_prime);
723 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
724 do {
725 rc = zcrypt_rsa_crt(&crt64);
726 } while (rc == -EAGAIN);
727 if (!rc)
728 rc = put_user(crt64.outputdatalength,
729 &ucrt32->outputdatalength);
730 return rc;
731}
732
733struct compat_ica_xcRB {
734 unsigned short agent_ID;
735 unsigned int user_defined;
736 unsigned short request_ID;
737 unsigned int request_control_blk_length;
738 unsigned char padding1[16 - sizeof (compat_uptr_t)];
739 compat_uptr_t request_control_blk_addr;
740 unsigned int request_data_length;
741 char padding2[16 - sizeof (compat_uptr_t)];
742 compat_uptr_t request_data_address;
743 unsigned int reply_control_blk_length;
744 char padding3[16 - sizeof (compat_uptr_t)];
745 compat_uptr_t reply_control_blk_addr;
746 unsigned int reply_data_length;
747 char padding4[16 - sizeof (compat_uptr_t)];
748 compat_uptr_t reply_data_addr;
749 unsigned short priority_window;
750 unsigned int status;
751} __attribute__((packed));
752
753static long trans_xcRB32(struct file *filp, unsigned int cmd,
754 unsigned long arg)
755{
756 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
757 struct compat_ica_xcRB xcRB32;
758 struct ica_xcRB xcRB64;
759 long rc;
760
761 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
762 return -EFAULT;
763 xcRB64.agent_ID = xcRB32.agent_ID;
764 xcRB64.user_defined = xcRB32.user_defined;
765 xcRB64.request_ID = xcRB32.request_ID;
766 xcRB64.request_control_blk_length =
767 xcRB32.request_control_blk_length;
768 xcRB64.request_control_blk_addr =
769 compat_ptr(xcRB32.request_control_blk_addr);
770 xcRB64.request_data_length =
771 xcRB32.request_data_length;
772 xcRB64.request_data_address =
773 compat_ptr(xcRB32.request_data_address);
774 xcRB64.reply_control_blk_length =
775 xcRB32.reply_control_blk_length;
776 xcRB64.reply_control_blk_addr =
777 compat_ptr(xcRB32.reply_control_blk_addr);
778 xcRB64.reply_data_length = xcRB32.reply_data_length;
779 xcRB64.reply_data_addr =
780 compat_ptr(xcRB32.reply_data_addr);
781 xcRB64.priority_window = xcRB32.priority_window;
782 xcRB64.status = xcRB32.status;
783 do {
784 rc = zcrypt_send_cprb(&xcRB64);
785 } while (rc == -EAGAIN);
786 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
787 xcRB32.reply_data_length = xcRB64.reply_data_length;
788 xcRB32.status = xcRB64.status;
789 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
790 return -EFAULT;
791 return rc;
792}
793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg)
796{
797 if (cmd == ICARSAMODEXPO)
798 return trans_modexpo32(filp, cmd, arg);
799 if (cmd == ICARSACRT)
800 return trans_modexpo_crt32(filp, cmd, arg);
801 if (cmd == ZSECSENDCPRB)
802 return trans_xcRB32(filp, cmd, arg);
803 return zcrypt_unlocked_ioctl(filp, cmd, arg);
804}
805#endif
806
807/**
808 * Misc device file operations.
809 */
810static struct file_operations zcrypt_fops = {
811 .owner = THIS_MODULE,
812 .read = zcrypt_read,
813 .write = zcrypt_write,
814 .unlocked_ioctl = zcrypt_unlocked_ioctl,
815#ifdef CONFIG_COMPAT
816 .compat_ioctl = zcrypt_compat_ioctl,
817#endif
818 .open = zcrypt_open,
819 .release = zcrypt_release
820};
821
822/**
823 * Misc device.
824 */
825static struct miscdevice zcrypt_misc_device = {
826 .minor = MISC_DYNAMIC_MINOR,
827 .name = "z90crypt",
828 .fops = &zcrypt_fops,
829};
830
831/**
832 * Deprecated /proc entry support.
833 */
834static struct proc_dir_entry *zcrypt_entry;
835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len)
838{
839 int hl, i;
840
841 hl = 0;
842 for (i = 0; i < len; i++)
843 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
844 hl += sprintf(outaddr+hl, " ");
845 return hl;
846}
847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len)
850{
851 int hl, inl, c, cx;
852
853 hl = sprintf(outaddr, " ");
854 inl = 0;
855 for (c = 0; c < (len / 16); c++) {
856 hl += sprintcl(outaddr+hl, addr+inl, 16);
857 inl += 16;
858 }
859 cx = len%16;
860 if (cx) {
861 hl += sprintcl(outaddr+hl, addr+inl, cx);
862 inl += cx;
863 }
864 hl += sprintf(outaddr+hl, "\n");
865 return hl;
866}
867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len)
870{
871 int hl, inl, r, rx;
872
873 hl = sprintf(outaddr, "\n%s\n", title);
874 inl = 0;
875 for (r = 0; r < (len / 64); r++) {
876 hl += sprintrw(outaddr+hl, addr+inl, 64);
877 inl += 64;
878 }
879 rx = len % 64;
880 if (rx) {
881 hl += sprintrw(outaddr+hl, addr+inl, rx);
882 inl += rx;
883 }
884 hl += sprintf(outaddr+hl, "\n");
885 return hl;
886}
887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len)
890{
891 int hl, r;
892
893 hl = sprintf(outaddr, "\n%s\n", title);
894 for (r = 0; r < len; r++) {
895 if ((r % 8) == 0)
896 hl += sprintf(outaddr+hl, " ");
897 hl += sprintf(outaddr+hl, "%08X ", array[r]);
898 if ((r % 8) == 7)
899 hl += sprintf(outaddr+hl, "\n");
900 }
901 hl += sprintf(outaddr+hl, "\n");
902 return hl;
903}
904
905static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
906 int count, int *eof, void *data)
907{
908 unsigned char *workarea;
909 int len;
910
911 len = 0;
912
913 /* resp_buff is a page. Use the right half for a work area */
914 workarea = resp_buff + 2000;
915 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
916 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
917 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
918 ap_domain_index);
919 len += sprintf(resp_buff + len, "Total device count: %d\n",
920 zcrypt_device_count);
921 len += sprintf(resp_buff + len, "PCICA count: %d\n",
922 zcrypt_count_type(ZCRYPT_PCICA));
923 len += sprintf(resp_buff + len, "PCICC count: %d\n",
924 zcrypt_count_type(ZCRYPT_PCICC));
925 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
926 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
927 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
928 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
929 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
930 zcrypt_count_type(ZCRYPT_CEX2C));
931 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
932 zcrypt_count_type(ZCRYPT_CEX2A));
933 len += sprintf(resp_buff + len, "requestq count: %d\n",
934 zcrypt_requestq_count());
935 len += sprintf(resp_buff + len, "pendingq count: %d\n",
936 zcrypt_pendingq_count());
937 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
938 atomic_read(&zcrypt_open_count));
939 zcrypt_status_mask(workarea);
940 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
941 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
942 resp_buff+len, workarea, AP_DEVICES);
943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1;
950 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
951 return len;
952}
953
954static void zcrypt_disable_card(int index)
955{
956 struct zcrypt_device *zdev;
957
958 spin_lock_bh(&zcrypt_device_lock);
959 list_for_each_entry(zdev, &zcrypt_device_list, list)
960 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
961 zdev->online = 0;
962 ap_flush_queue(zdev->ap_dev);
963 break;
964 }
965 spin_unlock_bh(&zcrypt_device_lock);
966}
967
968static void zcrypt_enable_card(int index)
969{
970 struct zcrypt_device *zdev;
971
972 spin_lock_bh(&zcrypt_device_lock);
973 list_for_each_entry(zdev, &zcrypt_device_list, list)
974 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
975 zdev->online = 1;
976 break;
977 }
978 spin_unlock_bh(&zcrypt_device_lock);
979}
980
981static int zcrypt_status_write(struct file *file, const char __user *buffer,
982 unsigned long count, void *data)
983{
984 unsigned char *lbuf, *ptr;
985 unsigned long local_count;
986 int j;
987
988 if (count <= 0)
989 return 0;
990
991#define LBUFSIZE 1200UL
992 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
993 if (!lbuf) {
994 PRINTK("kmalloc failed!\n");
995 return 0;
996 }
997
998 local_count = min(LBUFSIZE - 1, count);
999 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1000 kfree(lbuf);
1001 return -EFAULT;
1002 }
1003 lbuf[local_count] = '\0';
1004
1005 ptr = strstr(lbuf, "Online devices");
1006 if (!ptr) {
1007 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1008 goto out;
1009 }
1010 ptr = strstr(ptr, "\n");
1011 if (!ptr) {
1012 PRINTK("Unable to parse data (missing newline "
1013 "after \"Online devices\")\n");
1014 goto out;
1015 }
1016 ptr++;
1017
1018 if (strstr(ptr, "Waiting work element counts") == NULL) {
1019 PRINTK("Unable to parse data (missing "
1020 "\"Waiting work element counts\")\n");
1021 goto out;
1022 }
1023
1024 for (j = 0; j < 64 && *ptr; ptr++) {
1025 /**
1026 * '0' for no device, '1' for PCICA, '2' for PCICC,
1027 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1028 * '5' for CEX2C and '6' for CEX2A'
1029 */
1030 if (*ptr >= '0' && *ptr <= '6')
1031 j++;
1032 else if (*ptr == 'd' || *ptr == 'D')
1033 zcrypt_disable_card(j++);
1034 else if (*ptr == 'e' || *ptr == 'E')
1035 zcrypt_enable_card(j++);
1036 else if (*ptr != ' ' && *ptr != '\t')
1037 break;
1038 }
1039out:
1040 kfree(lbuf);
1041 return count;
1042}
1043
1044/**
1045 * The module initialization code.
1046 */
1047int __init zcrypt_api_init(void)
1048{
1049 int rc;
1050
1051 /* Register the request sprayer. */
1052 rc = misc_register(&zcrypt_misc_device);
1053 if (rc < 0) {
1054 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1055 zcrypt_misc_device.minor, rc);
1056 goto out;
1057 }
1058
1059 /* Set up the proc file system */
1060 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1061 if (!zcrypt_entry) {
1062 PRINTK("Couldn't create z90crypt proc entry\n");
1063 rc = -ENOMEM;
1064 goto out_misc;
1065 }
1066 zcrypt_entry->nlink = 1;
1067 zcrypt_entry->data = NULL;
1068 zcrypt_entry->read_proc = zcrypt_status_read;
1069 zcrypt_entry->write_proc = zcrypt_status_write;
1070
1071 return 0;
1072
1073out_misc:
1074 misc_deregister(&zcrypt_misc_device);
1075out:
1076 return rc;
1077}
1078
1079/**
1080 * The module termination code.
1081 */
1082void zcrypt_api_exit(void)
1083{
1084 remove_proc_entry("driver/z90crypt", NULL);
1085 misc_deregister(&zcrypt_misc_device);
1086}
1087
1088#ifndef CONFIG_ZCRYPT_MONOLITHIC
1089module_init(zcrypt_api_init);
1090module_exit(zcrypt_api_exit);
1091#endif
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 000000000000..de4877ee618f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,141 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_
32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h"
62#include <asm/zcrypt.h>
63
64/* deprecated status calls */
65#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
66#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
67
68/**
69 * This structure is deprecated and the corresponding ioctl() has been
70 * replaced with individual ioctl()s for each piece of data!
71 */
72struct ica_z90_status {
73 int totalcount;
74 int leedslitecount; // PCICA
75 int leeds2count; // PCICC
76 // int PCIXCCCount; is not in struct for backward compatibility
77 int requestqWaitCount;
78 int pendingqWaitCount;
79 int totalOpenCount;
80 int cryptoDomain;
81 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
82 // 5=CEX2C
83 unsigned char status[64];
84 // qdepth: # work elements waiting for each device
85 unsigned char qdepth[64];
86};
87
88/**
89 * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
90 * PCIXCC_MCL3, CEX2C, or CEX2A
91 *
92 * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
93 * Internal Code (LIC) (EC J12220 level 29).
94 * PCIXCC_MCL2 refers to any LIC before this level.
95 */
96#define ZCRYPT_PCICA 1
97#define ZCRYPT_PCICC 2
98#define ZCRYPT_PCIXCC_MCL2 3
99#define ZCRYPT_PCIXCC_MCL3 4
100#define ZCRYPT_CEX2C 5
101#define ZCRYPT_CEX2A 6
102
103struct zcrypt_device;
104
105struct zcrypt_ops {
106 long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
107 long (*rsa_modexpo_crt)(struct zcrypt_device *,
108 struct ica_rsa_modexpo_crt *);
109 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
110};
111
112struct zcrypt_device {
113 struct list_head list; /* Device list. */
114 spinlock_t lock; /* Per device lock. */
115 struct kref refcount; /* device refcounting */
116 struct ap_device *ap_dev; /* The "real" ap device. */
117 struct zcrypt_ops *ops; /* Crypto operations. */
118 int online; /* User online/offline */
119
120 int user_space_type; /* User space device id. */
121 char *type_string; /* User space device name. */
122 int min_mod_size; /* Min number of bits. */
123 int max_mod_size; /* Max number of bits. */
124 int short_crt; /* Card has crt length restriction. */
125 int speed_rating; /* Speed of the crypto device. */
126
127 int request_count; /* # current requests. */
128
129 struct ap_message reply; /* Per-device reply structure. */
130};
131
132struct zcrypt_device *zcrypt_device_alloc(size_t);
133void zcrypt_device_free(struct zcrypt_device *);
134void zcrypt_device_get(struct zcrypt_device *);
135int zcrypt_device_put(struct zcrypt_device *);
136int zcrypt_device_register(struct zcrypt_device *);
137void zcrypt_device_unregister(struct zcrypt_device *);
138int zcrypt_api_init(void);
139void zcrypt_api_exit(void);
140
141#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 000000000000..8dbcf0eef3e5
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,350 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cca_key.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CCA_KEY_H_
29#define _ZCRYPT_CCA_KEY_H_
30
31struct T6_keyBlock_hdr {
32 unsigned short blen;
33 unsigned short ulen;
34 unsigned short flags;
35};
36
37/**
38 * mapping for the cca private ME key token.
39 * Three parts of interest here: the header, the private section and
40 * the public section.
41 *
42 * mapping for the cca key token header
43 */
44struct cca_token_hdr {
45 unsigned char token_identifier;
46 unsigned char version;
47 unsigned short token_length;
48 unsigned char reserved[4];
49} __attribute__((packed));
50
51#define CCA_TKN_HDR_ID_EXT 0x1E
52
53/**
54 * mapping for the cca private ME section
55 */
56struct cca_private_ext_ME_sec {
57 unsigned char section_identifier;
58 unsigned char version;
59 unsigned short section_length;
60 unsigned char private_key_hash[20];
61 unsigned char reserved1[4];
62 unsigned char key_format;
63 unsigned char reserved2;
64 unsigned char key_name_hash[20];
65 unsigned char key_use_flags[4];
66 unsigned char reserved3[6];
67 unsigned char reserved4[24];
68 unsigned char confounder[24];
69 unsigned char exponent[128];
70 unsigned char modulus[128];
71} __attribute__((packed));
72
73#define CCA_PVT_USAGE_ALL 0x80
74
75/**
76 * mapping for the cca public section
77 * In a private key, the modulus doesn't appear in the public
78 * section. So, an arbitrary public exponent of 0x010001 will be
79 * used, for a section length of 0x0F always.
80 */
81struct cca_public_sec {
82 unsigned char section_identifier;
83 unsigned char version;
84 unsigned short section_length;
85 unsigned char reserved[2];
86 unsigned short exponent_len;
87 unsigned short modulus_bit_len;
88 unsigned short modulus_byte_len; /* In a private key, this is 0 */
89} __attribute__((packed));
90
91/**
92 * mapping for the cca private CRT key 'token'
93 * The first three parts (the only parts considered in this release)
94 * are: the header, the private section and the public section.
95 * The header and public section are the same as for the
96 * struct cca_private_ext_ME
97 *
98 * Following the structure are the quantities p, q, dp, dq, u, pad,
99 * and modulus, in that order, where pad_len is the modulo 8
100 * complement of the residue modulo 8 of the sum of
101 * (p_len + q_len + dp_len + dq_len + u_len).
102 */
103struct cca_pvt_ext_CRT_sec {
104 unsigned char section_identifier;
105 unsigned char version;
106 unsigned short section_length;
107 unsigned char private_key_hash[20];
108 unsigned char reserved1[4];
109 unsigned char key_format;
110 unsigned char reserved2;
111 unsigned char key_name_hash[20];
112 unsigned char key_use_flags[4];
113 unsigned short p_len;
114 unsigned short q_len;
115 unsigned short dp_len;
116 unsigned short dq_len;
117 unsigned short u_len;
118 unsigned short mod_len;
119 unsigned char reserved3[4];
120 unsigned short pad_len;
121 unsigned char reserved4[52];
122 unsigned char confounder[8];
123} __attribute__((packed));
124
125#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
126#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
127
128/**
129 * Set up private key fields of a type6 MEX message.
130 * Note that all numerics in the key token are big-endian,
131 * while the entries in the key block header are little-endian.
132 *
133 * @mex: pointer to user input data
134 * @p: pointer to memory area for the key
135 *
136 * Returns the size of the key area or -EFAULT
137 */
138static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
139 void *p, int big_endian)
140{
141 static struct cca_token_hdr static_pvt_me_hdr = {
142 .token_identifier = 0x1E,
143 .token_length = 0x0183,
144 };
145 static struct cca_private_ext_ME_sec static_pvt_me_sec = {
146 .section_identifier = 0x02,
147 .section_length = 0x016C,
148 .key_use_flags = {0x80,0x00,0x00,0x00},
149 };
150 static struct cca_public_sec static_pub_me_sec = {
151 .section_identifier = 0x04,
152 .section_length = 0x000F,
153 .exponent_len = 0x0003,
154 };
155 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
156 struct {
157 struct T6_keyBlock_hdr t6_hdr;
158 struct cca_token_hdr pvtMeHdr;
159 struct cca_private_ext_ME_sec pvtMeSec;
160 struct cca_public_sec pubMeSec;
161 char exponent[3];
162 } __attribute__((packed)) *key = p;
163 unsigned char *temp;
164
165 memset(key, 0, sizeof(*key));
166
167 if (big_endian) {
168 key->t6_hdr.blen = cpu_to_be16(0x189);
169 key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
170 } else {
171 key->t6_hdr.blen = cpu_to_le16(0x189);
172 key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
173 }
174 key->pvtMeHdr = static_pvt_me_hdr;
175 key->pvtMeSec = static_pvt_me_sec;
176 key->pubMeSec = static_pub_me_sec;
177 /**
178 * In a private key, the modulus doesn't appear in the public
179 * section. So, an arbitrary public exponent of 0x010001 will be
180 * used.
181 */
182 memcpy(key->exponent, pk_exponent, 3);
183
184 /* key parameter block */
185 temp = key->pvtMeSec.exponent +
186 sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
187 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
188 return -EFAULT;
189
190 /* modulus */
191 temp = key->pvtMeSec.modulus +
192 sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
193 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
194 return -EFAULT;
195 key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
196 return sizeof(*key);
197}
198
199/**
200 * Set up private key fields of a type6 MEX message. The _pad variant
201 * strips leading zeroes from the b_key.
202 * Note that all numerics in the key token are big-endian,
203 * while the entries in the key block header are little-endian.
204 *
205 * @mex: pointer to user input data
206 * @p: pointer to memory area for the key
207 *
208 * Returns the size of the key area or -EFAULT
209 */
210static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
211 void *p, int big_endian)
212{
213 static struct cca_token_hdr static_pub_hdr = {
214 .token_identifier = 0x1E,
215 };
216 static struct cca_public_sec static_pub_sec = {
217 .section_identifier = 0x04,
218 };
219 struct {
220 struct T6_keyBlock_hdr t6_hdr;
221 struct cca_token_hdr pubHdr;
222 struct cca_public_sec pubSec;
223 char exponent[0];
224 } __attribute__((packed)) *key = p;
225 unsigned char *temp;
226 int i;
227
228 memset(key, 0, sizeof(*key));
229
230 key->pubHdr = static_pub_hdr;
231 key->pubSec = static_pub_sec;
232
233 /* key parameter block */
234 temp = key->exponent;
235 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
236 return -EFAULT;
237 /* Strip leading zeroes from b_key. */
238 for (i = 0; i < mex->inputdatalength; i++)
239 if (temp[i])
240 break;
241 if (i >= mex->inputdatalength)
242 return -EINVAL;
243 memmove(temp, temp + i, mex->inputdatalength - i);
244 temp += mex->inputdatalength - i;
245 /* modulus */
246 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
247 return -EFAULT;
248
249 key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
250 key->pubSec.modulus_byte_len = mex->inputdatalength;
251 key->pubSec.exponent_len = mex->inputdatalength - i;
252 key->pubSec.section_length = sizeof(key->pubSec) +
253 2*mex->inputdatalength - i;
254 key->pubHdr.token_length =
255 key->pubSec.section_length + sizeof(key->pubHdr);
256 if (big_endian) {
257 key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
258 key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
259 } else {
260 key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
261 key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
262 }
263 return sizeof(*key) + 2*mex->inputdatalength - i;
264}
265
266/**
267 * Set up private key fields of a type6 CRT message.
268 * Note that all numerics in the key token are big-endian,
269 * while the entries in the key block header are little-endian.
270 *
271 * @mex: pointer to user input data
272 * @p: pointer to memory area for the key
273 *
274 * Returns the size of the key area or -EFAULT
275 */
276static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
277 void *p, int big_endian)
278{
279 static struct cca_public_sec static_cca_pub_sec = {
280 .section_identifier = 4,
281 .section_length = 0x000f,
282 .exponent_len = 0x0003,
283 };
284 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
285 struct {
286 struct T6_keyBlock_hdr t6_hdr;
287 struct cca_token_hdr token;
288 struct cca_pvt_ext_CRT_sec pvt;
289 char key_parts[0];
290 } __attribute__((packed)) *key = p;
291 struct cca_public_sec *pub;
292 int short_len, long_len, pad_len, key_len, size;
293
294 memset(key, 0, sizeof(*key));
295
296 short_len = crt->inputdatalength / 2;
297 long_len = short_len + 8;
298 pad_len = -(3*long_len + 2*short_len) & 7;
299 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
300 size = sizeof(*key) + key_len + sizeof(*pub) + 3;
301
302 /* parameter block.key block */
303 if (big_endian) {
304 key->t6_hdr.blen = cpu_to_be16(size);
305 key->t6_hdr.ulen = cpu_to_be16(size - 2);
306 } else {
307 key->t6_hdr.blen = cpu_to_le16(size);
308 key->t6_hdr.ulen = cpu_to_le16(size - 2);
309 }
310
311 /* key token header */
312 key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
313 key->token.token_length = size - 6;
314
315 /* private section */
316 key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
317 key->pvt.section_length = sizeof(key->pvt) + key_len;
318 key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
319 key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
320 key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
321 key->pvt.q_len = key->pvt.dq_len = short_len;
322 key->pvt.mod_len = crt->inputdatalength;
323 key->pvt.pad_len = pad_len;
324
325 /* key parts */
326 if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
327 copy_from_user(key->key_parts + long_len,
328 crt->nq_prime, short_len) ||
329 copy_from_user(key->key_parts + long_len + short_len,
330 crt->bp_key, long_len) ||
331 copy_from_user(key->key_parts + 2*long_len + short_len,
332 crt->bq_key, short_len) ||
333 copy_from_user(key->key_parts + 2*long_len + 2*short_len,
334 crt->u_mult_inv, long_len))
335 return -EFAULT;
336 memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
337 0xff, crt->inputdatalength);
338 pub = (struct cca_public_sec *)(key->key_parts + key_len);
339 *pub = static_cca_pub_sec;
340 pub->modulus_bit_len = 8 * crt->inputdatalength;
341 /**
342 * In a private key, the modulus doesn't appear in the public
343 * section. So, an arbitrary public exponent of 0x010001 will be
344 * used.
345 */
346 memcpy((char *) (pub + 1), pk_exponent, 3);
347 return size;
348}
349
350#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 000000000000..a62b00083d0c
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,435 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_cex2a.h"
39
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define CEX2A_SPEED_RATING 970
44
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define CEX2A_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
64static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
65static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_cex2a_driver = {
69 .probe = zcrypt_cex2a_probe,
70 .remove = zcrypt_cex2a_remove,
71 .receive = zcrypt_cex2a_receive,
72 .ids = zcrypt_cex2a_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type50 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *mod, *exp, *inp;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type50_meb1_msg *meb1 = ap_msg->message;
95 memset(meb1, 0, sizeof(*meb1));
96 ap_msg->length = sizeof(*meb1);
97 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
98 meb1->header.msg_len = sizeof(*meb1);
99 meb1->keyblock_type = TYPE50_MEB1_FMT;
100 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
101 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
102 inp = meb1->message + sizeof(meb1->message) - mod_len;
103 } else {
104 struct type50_meb2_msg *meb2 = ap_msg->message;
105 memset(meb2, 0, sizeof(*meb2));
106 ap_msg->length = sizeof(*meb2);
107 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
108 meb2->header.msg_len = sizeof(*meb2);
109 meb2->keyblock_type = TYPE50_MEB2_FMT;
110 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
111 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
112 inp = meb2->message + sizeof(meb2->message) - mod_len;
113 }
114
115 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
116 copy_from_user(exp, mex->b_key, mod_len) ||
117 copy_from_user(inp, mex->inputdata, mod_len))
118 return -EFAULT;
119 return 0;
120}
121
122/**
123 * Convert a ICACRT message to a type50 CRT message.
124 *
125 * @zdev: crypto device pointer
126 * @zreq: crypto request pointer
127 * @crt: pointer to user input data
128 *
129 * Returns 0 on success or -EFAULT.
130 */
131static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
132 struct ap_message *ap_msg,
133 struct ica_rsa_modexpo_crt *crt)
134{
135 int mod_len, short_len, long_len, long_offset;
136 unsigned char *p, *q, *dp, *dq, *u, *inp;
137
138 mod_len = crt->inputdatalength;
139 short_len = mod_len / 2;
140 long_len = mod_len / 2 + 8;
141
142 /*
143 * CEX2A cannot handle p, dp, or U > 128 bytes.
144 * If we have one of these, we need to do extra checking.
145 */
146 if (long_len > 128) {
147 /*
148 * zcrypt_rsa_crt already checked for the leading
149 * zeroes of np_prime, bp_key and u_mult_inc.
150 */
151 long_offset = long_len - 128;
152 long_len = 128;
153 } else
154 long_offset = 0;
155
156 /*
157 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
158 * the larger message structure.
159 */
160 if (long_len <= 64) {
161 struct type50_crb1_msg *crb1 = ap_msg->message;
162 memset(crb1, 0, sizeof(*crb1));
163 ap_msg->length = sizeof(*crb1);
164 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
165 crb1->header.msg_len = sizeof(*crb1);
166 crb1->keyblock_type = TYPE50_CRB1_FMT;
167 p = crb1->p + sizeof(crb1->p) - long_len;
168 q = crb1->q + sizeof(crb1->q) - short_len;
169 dp = crb1->dp + sizeof(crb1->dp) - long_len;
170 dq = crb1->dq + sizeof(crb1->dq) - short_len;
171 u = crb1->u + sizeof(crb1->u) - long_len;
172 inp = crb1->message + sizeof(crb1->message) - mod_len;
173 } else {
174 struct type50_crb2_msg *crb2 = ap_msg->message;
175 memset(crb2, 0, sizeof(*crb2));
176 ap_msg->length = sizeof(*crb2);
177 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
178 crb2->header.msg_len = sizeof(*crb2);
179 crb2->keyblock_type = TYPE50_CRB2_FMT;
180 p = crb2->p + sizeof(crb2->p) - long_len;
181 q = crb2->q + sizeof(crb2->q) - short_len;
182 dp = crb2->dp + sizeof(crb2->dp) - long_len;
183 dq = crb2->dq + sizeof(crb2->dq) - short_len;
184 u = crb2->u + sizeof(crb2->u) - long_len;
185 inp = crb2->message + sizeof(crb2->message) - mod_len;
186 }
187
188 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
189 copy_from_user(q, crt->nq_prime, short_len) ||
190 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
191 copy_from_user(dq, crt->bq_key, short_len) ||
192 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
193 copy_from_user(inp, crt->inputdata, mod_len))
194 return -EFAULT;
195
196
197 return 0;
198}
199
200/**
201 * Copy results from a type 80 reply message back to user space.
202 *
203 * @zdev: crypto device pointer
204 * @reply: reply AP message.
205 * @data: pointer to user output data
206 * @length: size of user output data
207 *
208 * Returns 0 on success or -EFAULT.
209 */
210static int convert_type80(struct zcrypt_device *zdev,
211 struct ap_message *reply,
212 char __user *outputdata,
213 unsigned int outputdatalength)
214{
215 struct type80_hdr *t80h = reply->message;
216 unsigned char *data;
217
218 if (t80h->len < sizeof(*t80h) + outputdatalength) {
219 /* The result is too short, the CEX2A card may not do that.. */
220 zdev->online = 0;
221 return -EAGAIN; /* repeat the request on a different device. */
222 }
223 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
224 data = reply->message + t80h->len - outputdatalength;
225 if (copy_to_user(outputdata, data, outputdatalength))
226 return -EFAULT;
227 return 0;
228}
229
230static int convert_response(struct zcrypt_device *zdev,
231 struct ap_message *reply,
232 char __user *outputdata,
233 unsigned int outputdatalength)
234{
235 /* Response type byte is the second byte in the response. */
236 switch (((unsigned char *) reply->message)[1]) {
237 case TYPE82_RSP_CODE:
238 case TYPE88_RSP_CODE:
239 return convert_error(zdev, reply);
240 case TYPE80_RSP_CODE:
241 return convert_type80(zdev, reply,
242 outputdata, outputdatalength);
243 default: /* Unknown response type, this should NEVER EVER happen */
244 PRINTK("Unrecognized Message Header: %08x%08x\n",
245 *(unsigned int *) reply->message,
246 *(unsigned int *) (reply->message+4));
247 zdev->online = 0;
248 return -EAGAIN; /* repeat the request on a different device. */
249 }
250}
251
252/**
253 * This function is called from the AP bus code after a crypto request
254 * "msg" has finished with the reply message "reply".
255 * It is called from tasklet context.
256 * @ap_dev: pointer to the AP device
257 * @msg: pointer to the AP message
258 * @reply: pointer to the AP reply message
259 */
260static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
261 struct ap_message *msg,
262 struct ap_message *reply)
263{
264 static struct error_hdr error_reply = {
265 .type = TYPE82_RSP_CODE,
266 .reply_code = REP82_ERROR_MACHINE_FAILURE,
267 };
268 struct type80_hdr *t80h = reply->message;
269 int length;
270
271 /* Copy the reply message to the request message buffer. */
272 if (IS_ERR(reply))
273 memcpy(msg->message, &error_reply, sizeof(error_reply));
274 else if (t80h->type == TYPE80_RSP_CODE) {
275 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
276 memcpy(msg->message, reply->message, length);
277 } else
278 memcpy(msg->message, reply->message, sizeof error_reply);
279 complete((struct completion *) msg->private);
280}
281
282static atomic_t zcrypt_step = ATOMIC_INIT(0);
283
284/**
285 * The request distributor calls this function if it picked the CEX2A
286 * device to handle a modexpo request.
287 * @zdev: pointer to zcrypt_device structure that identifies the
288 * CEX2A device to the request distributor
289 * @mex: pointer to the modexpo request buffer
290 */
291static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
292 struct ica_rsa_modexpo *mex)
293{
294 struct ap_message ap_msg;
295 struct completion work;
296 int rc;
297
298 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
299 if (!ap_msg.message)
300 return -ENOMEM;
301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
302 atomic_inc_return(&zcrypt_step);
303 ap_msg.private = &work;
304 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
305 if (rc)
306 goto out_free;
307 init_completion(&work);
308 ap_queue_message(zdev->ap_dev, &ap_msg);
309 rc = wait_for_completion_interruptible_timeout(
310 &work, CEX2A_CLEANUP_TIME);
311 if (rc > 0)
312 rc = convert_response(zdev, &ap_msg, mex->outputdata,
313 mex->outputdatalength);
314 else {
315 /* Signal pending or message timed out. */
316 ap_cancel_message(zdev->ap_dev, &ap_msg);
317 if (rc == 0)
318 /* Message timed out. */
319 rc = -ETIME;
320 }
321out_free:
322 kfree(ap_msg.message);
323 return rc;
324}
325
326/**
327 * The request distributor calls this function if it picked the CEX2A
328 * device to handle a modexpo_crt request.
329 * @zdev: pointer to zcrypt_device structure that identifies the
330 * CEX2A device to the request distributor
331 * @crt: pointer to the modexpoc_crt request buffer
332 */
333static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
334 struct ica_rsa_modexpo_crt *crt)
335{
336 struct ap_message ap_msg;
337 struct completion work;
338 int rc;
339
340 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
341 if (!ap_msg.message)
342 return -ENOMEM;
343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
344 atomic_inc_return(&zcrypt_step);
345 ap_msg.private = &work;
346 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
347 if (rc)
348 goto out_free;
349 init_completion(&work);
350 ap_queue_message(zdev->ap_dev, &ap_msg);
351 rc = wait_for_completion_interruptible_timeout(
352 &work, CEX2A_CLEANUP_TIME);
353 if (rc > 0)
354 rc = convert_response(zdev, &ap_msg, crt->outputdata,
355 crt->outputdatalength);
356 else {
357 /* Signal pending or message timed out. */
358 ap_cancel_message(zdev->ap_dev, &ap_msg);
359 if (rc == 0)
360 /* Message timed out. */
361 rc = -ETIME;
362 }
363out_free:
364 kfree(ap_msg.message);
365 return rc;
366}
367
368/**
369 * The crypto operations for a CEX2A card.
370 */
371static struct zcrypt_ops zcrypt_cex2a_ops = {
372 .rsa_modexpo = zcrypt_cex2a_modexpo,
373 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
374};
375
376/**
377 * Probe function for CEX2A cards. It always accepts the AP device
378 * since the bus_match already checked the hardware type.
379 * @ap_dev: pointer to the AP device.
380 */
381static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
382{
383 struct zcrypt_device *zdev;
384 int rc;
385
386 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
387 if (!zdev)
388 return -ENOMEM;
389 zdev->ap_dev = ap_dev;
390 zdev->ops = &zcrypt_cex2a_ops;
391 zdev->online = 1;
392 zdev->user_space_type = ZCRYPT_CEX2A;
393 zdev->type_string = "CEX2A";
394 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
395 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
396 zdev->short_crt = 1;
397 zdev->speed_rating = CEX2A_SPEED_RATING;
398 ap_dev->reply = &zdev->reply;
399 ap_dev->private = zdev;
400 rc = zcrypt_device_register(zdev);
401 if (rc)
402 goto out_free;
403 return 0;
404
405out_free:
406 ap_dev->private = NULL;
407 zcrypt_device_free(zdev);
408 return rc;
409}
410
411/**
412 * This is called to remove the extended CEX2A driver information
413 * if an AP device is removed.
414 */
415static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
416{
417 struct zcrypt_device *zdev = ap_dev->private;
418
419 zcrypt_device_unregister(zdev);
420}
421
422int __init zcrypt_cex2a_init(void)
423{
424 return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
425}
426
427void __exit zcrypt_cex2a_exit(void)
428{
429 ap_driver_unregister(&zcrypt_cex2a_driver);
430}
431
432#ifndef CONFIG_ZCRYPT_MONOLITHIC
433module_init(zcrypt_cex2a_init);
434module_exit(zcrypt_cex2a_exit);
435#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 000000000000..8f69d1dacab8
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,126 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CEX2A_H_
29#define _ZCRYPT_CEX2A_H_
30
31/**
32 * The type 50 message family is associated with a CEX2A card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type50_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x50 */
44 unsigned short msg_len;
45 unsigned char reserved2;
46 unsigned char ignored;
47 unsigned short reserved3;
48} __attribute__((packed));
49
50#define TYPE50_TYPE_CODE 0x50
51
52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012
56
57/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg {
59 struct type50_hdr header;
60 unsigned short keyblock_type; /* 0x0001 */
61 unsigned char reserved[6];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64 unsigned char message[128];
65} __attribute__((packed));
66
67/* Mod-Exp, with a large modulus */
68struct type50_meb2_msg {
69 struct type50_hdr header;
70 unsigned short keyblock_type; /* 0x0002 */
71 unsigned char reserved[6];
72 unsigned char exponent[256];
73 unsigned char modulus[256];
74 unsigned char message[256];
75} __attribute__((packed));
76
77/* CRT, with a small modulus */
78struct type50_crb1_msg {
79 struct type50_hdr header;
80 unsigned short keyblock_type; /* 0x0011 */
81 unsigned char reserved[6];
82 unsigned char p[64];
83 unsigned char q[64];
84 unsigned char dp[64];
85 unsigned char dq[64];
86 unsigned char u[64];
87 unsigned char message[128];
88} __attribute__((packed));
89
90/* CRT, with a large modulus */
91struct type50_crb2_msg {
92 struct type50_hdr header;
93 unsigned short keyblock_type; /* 0x0012 */
94 unsigned char reserved[6];
95 unsigned char p[128];
96 unsigned char q[128];
97 unsigned char dp[128];
98 unsigned char dq[128];
99 unsigned char u[128];
100 unsigned char message[256];
101} __attribute__((packed));
102
103/**
104 * The type 80 response family is associated with a CEX2A card.
105 *
106 * Note that all unsigned char arrays are right-justified and left-padded
107 * with zeroes.
108 *
109 * Note that all reserved fields must be zeroes.
110 */
111
112#define TYPE80_RSP_CODE 0x80
113
114struct type80_hdr {
115 unsigned char reserved1;
116 unsigned char type; /* 0x80 */
117 unsigned short len;
118 unsigned char code; /* 0x00 */
119 unsigned char reserved2[3];
120 unsigned char reserved3[8];
121} __attribute__((packed));
122
123int zcrypt_cex2a_init(void);
124void zcrypt_cex2a_exit(void);
125
126#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 000000000000..2cb616ba8bec
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,133 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_error.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_ERROR_H_
29#define _ZCRYPT_ERROR_H_
30
31#include "zcrypt_api.h"
32
33/**
34 * Reply Messages
35 *
36 * Error reply messages are of two types:
37 * 82: Error (see below)
38 * 88: Error (see below)
39 * Both type 82 and type 88 have the same structure in the header.
40 *
41 * Request reply messages are of three known types:
42 * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
43 * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
44 * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
45 *
46 */
47struct error_hdr {
48 unsigned char reserved1; /* 0x00 */
49 unsigned char type; /* 0x82 or 0x88 */
50 unsigned char reserved2[2]; /* 0x0000 */
51 unsigned char reply_code; /* reply code */
52 unsigned char reserved3[3]; /* 0x000000 */
53};
54
55#define TYPE82_RSP_CODE 0x82
56#define TYPE88_RSP_CODE 0x88
57
58#define REP82_ERROR_MACHINE_FAILURE 0x10
59#define REP82_ERROR_PREEMPT_FAILURE 0x12
60#define REP82_ERROR_CHECKPT_FAILURE 0x14
61#define REP82_ERROR_MESSAGE_TYPE 0x20
62#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
63#define REP82_ERROR_INVALID_MSG_LEN 0x23
64#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
65#define REP82_ERROR_FORMAT_FIELD 0x29
66#define REP82_ERROR_INVALID_COMMAND 0x30
67#define REP82_ERROR_MALFORMED_MSG 0x40
68#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
69#define REP82_ERROR_WORD_ALIGNMENT 0x60
70#define REP82_ERROR_MESSAGE_LENGTH 0x80
71#define REP82_ERROR_OPERAND_INVALID 0x82
72#define REP82_ERROR_OPERAND_SIZE 0x84
73#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
74#define REP82_ERROR_RESERVED_FIELD 0x88
75#define REP82_ERROR_TRANSPORT_FAIL 0x90
76#define REP82_ERROR_PACKET_TRUNCATED 0xA0
77#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
78
79#define REP88_ERROR_MODULE_FAILURE 0x10
80
81#define REP88_ERROR_MESSAGE_TYPE 0x20
82#define REP88_ERROR_MESSAGE_MALFORMD 0x22
83#define REP88_ERROR_MESSAGE_LENGTH 0x23
84#define REP88_ERROR_RESERVED_FIELD 0x24
85#define REP88_ERROR_KEY_TYPE 0x34
86#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
87#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
88#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
89
90static inline int convert_error(struct zcrypt_device *zdev,
91 struct ap_message *reply)
92{
93 struct error_hdr *ehdr = reply->message;
94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE:
102 case REP82_ERROR_EVEN_MOD_IN_OPND:
103 case REP88_ERROR_MESSAGE_MALFORMD:
104 // REP88_ERROR_INVALID_KEY // '82' CEX2A
105 // REP88_ERROR_OPERAND // '84' CEX2A
106 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
107 /* Invalid input data. */
108 return -EINVAL;
109 case REP82_ERROR_MESSAGE_TYPE:
110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
111 /**
112 * To sent a message of the wrong type is a bug in the
113 * device driver. Warn about it, disable the device
114 * and then repeat the request.
115 */
116 WARN_ON(1);
117 zdev->online = 0;
118 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE:
121 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
122 /* If a card fails disable it and repeat the request. */
123 zdev->online = 0;
124 return -EAGAIN;
125 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */
130 }
131}
132
133#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
new file mode 100644
index 000000000000..2a9349ad68b7
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -0,0 +1,100 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_mono.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/miscdevice.h>
32#include <linux/fs.h>
33#include <linux/proc_fs.h>
34#include <linux/compat.h>
35#include <asm/atomic.h>
36#include <asm/uaccess.h>
37
38#include "ap_bus.h"
39#include "zcrypt_api.h"
40#include "zcrypt_pcica.h"
41#include "zcrypt_pcicc.h"
42#include "zcrypt_pcixcc.h"
43#include "zcrypt_cex2a.h"
44
45/**
46 * The module initialization code.
47 */
48int __init zcrypt_init(void)
49{
50 int rc;
51
52 rc = ap_module_init();
53 if (rc)
54 goto out;
55 rc = zcrypt_api_init();
56 if (rc)
57 goto out_ap;
58 rc = zcrypt_pcica_init();
59 if (rc)
60 goto out_api;
61 rc = zcrypt_pcicc_init();
62 if (rc)
63 goto out_pcica;
64 rc = zcrypt_pcixcc_init();
65 if (rc)
66 goto out_pcicc;
67 rc = zcrypt_cex2a_init();
68 if (rc)
69 goto out_pcixcc;
70 return 0;
71
72out_pcixcc:
73 zcrypt_pcixcc_exit();
74out_pcicc:
75 zcrypt_pcicc_exit();
76out_pcica:
77 zcrypt_pcica_exit();
78out_api:
79 zcrypt_api_exit();
80out_ap:
81 ap_module_exit();
82out:
83 return rc;
84}
85
86/**
87 * The module termination code.
88 */
89void __exit zcrypt_exit(void)
90{
91 zcrypt_cex2a_exit();
92 zcrypt_pcixcc_exit();
93 zcrypt_pcicc_exit();
94 zcrypt_pcica_exit();
95 zcrypt_api_exit();
96 ap_module_exit();
97}
98
99module_init(zcrypt_init);
100module_exit(zcrypt_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
new file mode 100644
index 000000000000..b6a4ecdc8025
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -0,0 +1,418 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcica.h"
39
40#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */
41#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define PCICA_SPEED_RATING 2800
44
45#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */
46#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define PCICA_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_pcica_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_pcica_probe(struct ap_device *ap_dev);
64static void zcrypt_pcica_remove(struct ap_device *ap_dev);
65static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_pcica_driver = {
69 .probe = zcrypt_pcica_probe,
70 .remove = zcrypt_pcica_remove,
71 .receive = zcrypt_pcica_receive,
72 .ids = zcrypt_pcica_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type4 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *modulus, *exponent, *message;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type4_sme *sme = ap_msg->message;
95 memset(sme, 0, sizeof(*sme));
96 ap_msg->length = sizeof(*sme);
97 sme->header.msg_fmt = TYPE4_SME_FMT;
98 sme->header.msg_len = sizeof(*sme);
99 sme->header.msg_type_code = TYPE4_TYPE_CODE;
100 sme->header.request_code = TYPE4_REQU_CODE;
101 modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
102 exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
103 message = sme->message + sizeof(sme->message) - mod_len;
104 } else {
105 struct type4_lme *lme = ap_msg->message;
106 memset(lme, 0, sizeof(*lme));
107 ap_msg->length = sizeof(*lme);
108 lme->header.msg_fmt = TYPE4_LME_FMT;
109 lme->header.msg_len = sizeof(*lme);
110 lme->header.msg_type_code = TYPE4_TYPE_CODE;
111 lme->header.request_code = TYPE4_REQU_CODE;
112 modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
113 exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
114 message = lme->message + sizeof(lme->message) - mod_len;
115 }
116
117 if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
118 copy_from_user(exponent, mex->b_key, mod_len) ||
119 copy_from_user(message, mex->inputdata, mod_len))
120 return -EFAULT;
121 return 0;
122}
123
124/**
125 * Convert a ICACRT message to a type4 CRT message.
126 *
127 * @zdev: crypto device pointer
128 * @zreq: crypto request pointer
129 * @crt: pointer to user input data
130 *
131 * Returns 0 on success or -EFAULT.
132 */
133static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
134 struct ap_message *ap_msg,
135 struct ica_rsa_modexpo_crt *crt)
136{
137 unsigned char *p, *q, *dp, *dq, *u, *inp;
138 int mod_len, short_len, long_len;
139
140 mod_len = crt->inputdatalength;
141 short_len = mod_len / 2;
142 long_len = mod_len / 2 + 8;
143
144 if (mod_len <= 128) {
145 struct type4_scr *scr = ap_msg->message;
146 memset(scr, 0, sizeof(*scr));
147 ap_msg->length = sizeof(*scr);
148 scr->header.msg_type_code = TYPE4_TYPE_CODE;
149 scr->header.request_code = TYPE4_REQU_CODE;
150 scr->header.msg_fmt = TYPE4_SCR_FMT;
151 scr->header.msg_len = sizeof(*scr);
152 p = scr->p + sizeof(scr->p) - long_len;
153 q = scr->q + sizeof(scr->q) - short_len;
154 dp = scr->dp + sizeof(scr->dp) - long_len;
155 dq = scr->dq + sizeof(scr->dq) - short_len;
156 u = scr->u + sizeof(scr->u) - long_len;
157 inp = scr->message + sizeof(scr->message) - mod_len;
158 } else {
159 struct type4_lcr *lcr = ap_msg->message;
160 memset(lcr, 0, sizeof(*lcr));
161 ap_msg->length = sizeof(*lcr);
162 lcr->header.msg_type_code = TYPE4_TYPE_CODE;
163 lcr->header.request_code = TYPE4_REQU_CODE;
164 lcr->header.msg_fmt = TYPE4_LCR_FMT;
165 lcr->header.msg_len = sizeof(*lcr);
166 p = lcr->p + sizeof(lcr->p) - long_len;
167 q = lcr->q + sizeof(lcr->q) - short_len;
168 dp = lcr->dp + sizeof(lcr->dp) - long_len;
169 dq = lcr->dq + sizeof(lcr->dq) - short_len;
170 u = lcr->u + sizeof(lcr->u) - long_len;
171 inp = lcr->message + sizeof(lcr->message) - mod_len;
172 }
173
174 if (copy_from_user(p, crt->np_prime, long_len) ||
175 copy_from_user(q, crt->nq_prime, short_len) ||
176 copy_from_user(dp, crt->bp_key, long_len) ||
177 copy_from_user(dq, crt->bq_key, short_len) ||
178 copy_from_user(u, crt->u_mult_inv, long_len) ||
179 copy_from_user(inp, crt->inputdata, mod_len))
180 return -EFAULT;
181 return 0;
182}
183
184/**
185 * Copy results from a type 84 reply message back to user space.
186 *
187 * @zdev: crypto device pointer
188 * @reply: reply AP message.
189 * @data: pointer to user output data
190 * @length: size of user output data
191 *
192 * Returns 0 on success or -EFAULT.
193 */
194static inline int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply,
196 char __user *outputdata,
197 unsigned int outputdatalength)
198{
199 struct type84_hdr *t84h = reply->message;
200 char *data;
201
202 if (t84h->len < sizeof(*t84h) + outputdatalength) {
203 /* The result is too short, the PCICA card may not do that.. */
204 zdev->online = 0;
205 return -EAGAIN; /* repeat the request on a different device. */
206 }
207 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
208 data = reply->message + t84h->len - outputdatalength;
209 if (copy_to_user(outputdata, data, outputdatalength))
210 return -EFAULT;
211 return 0;
212}
213
214static int convert_response(struct zcrypt_device *zdev,
215 struct ap_message *reply,
216 char __user *outputdata,
217 unsigned int outputdatalength)
218{
219 /* Response type byte is the second byte in the response. */
220 switch (((unsigned char *) reply->message)[1]) {
221 case TYPE82_RSP_CODE:
222 case TYPE88_RSP_CODE:
223 return convert_error(zdev, reply);
224 case TYPE84_RSP_CODE:
225 return convert_type84(zdev, reply,
226 outputdata, outputdatalength);
227 default: /* Unknown response type, this should NEVER EVER happen */
228 PRINTK("Unrecognized Message Header: %08x%08x\n",
229 *(unsigned int *) reply->message,
230 *(unsigned int *) (reply->message+4));
231 zdev->online = 0;
232 return -EAGAIN; /* repeat the request on a different device. */
233 }
234}
235
236/**
237 * This function is called from the AP bus code after a crypto request
238 * "msg" has finished with the reply message "reply".
239 * It is called from tasklet context.
240 * @ap_dev: pointer to the AP device
241 * @msg: pointer to the AP message
242 * @reply: pointer to the AP reply message
243 */
244static void zcrypt_pcica_receive(struct ap_device *ap_dev,
245 struct ap_message *msg,
246 struct ap_message *reply)
247{
248 static struct error_hdr error_reply = {
249 .type = TYPE82_RSP_CODE,
250 .reply_code = REP82_ERROR_MACHINE_FAILURE,
251 };
252 struct type84_hdr *t84h = reply->message;
253 int length;
254
255 /* Copy the reply message to the request message buffer. */
256 if (IS_ERR(reply))
257 memcpy(msg->message, &error_reply, sizeof(error_reply));
258 else if (t84h->code == TYPE84_RSP_CODE) {
259 length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
260 memcpy(msg->message, reply->message, length);
261 } else
262 memcpy(msg->message, reply->message, sizeof error_reply);
263 complete((struct completion *) msg->private);
264}
265
266static atomic_t zcrypt_step = ATOMIC_INIT(0);
267
268/**
269 * The request distributor calls this function if it picked the PCICA
270 * device to handle a modexpo request.
271 * @zdev: pointer to zcrypt_device structure that identifies the
272 * PCICA device to the request distributor
273 * @mex: pointer to the modexpo request buffer
274 */
275static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
276 struct ica_rsa_modexpo *mex)
277{
278 struct ap_message ap_msg;
279 struct completion work;
280 int rc;
281
282 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
283 if (!ap_msg.message)
284 return -ENOMEM;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
286 atomic_inc_return(&zcrypt_step);
287 ap_msg.private = &work;
288 rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
289 if (rc)
290 goto out_free;
291 init_completion(&work);
292 ap_queue_message(zdev->ap_dev, &ap_msg);
293 rc = wait_for_completion_interruptible_timeout(
294 &work, PCICA_CLEANUP_TIME);
295 if (rc > 0)
296 rc = convert_response(zdev, &ap_msg, mex->outputdata,
297 mex->outputdatalength);
298 else {
299 /* Signal pending or message timed out. */
300 ap_cancel_message(zdev->ap_dev, &ap_msg);
301 if (rc == 0)
302 /* Message timed out. */
303 rc = -ETIME;
304 }
305out_free:
306 kfree(ap_msg.message);
307 return rc;
308}
309
310/**
311 * The request distributor calls this function if it picked the PCICA
312 * device to handle a modexpo_crt request.
313 * @zdev: pointer to zcrypt_device structure that identifies the
314 * PCICA device to the request distributor
315 * @crt: pointer to the modexpoc_crt request buffer
316 */
317static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct ica_rsa_modexpo_crt *crt)
319{
320 struct ap_message ap_msg;
321 struct completion work;
322 int rc;
323
324 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
325 if (!ap_msg.message)
326 return -ENOMEM;
327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
328 atomic_inc_return(&zcrypt_step);
329 ap_msg.private = &work;
330 rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
331 if (rc)
332 goto out_free;
333 init_completion(&work);
334 ap_queue_message(zdev->ap_dev, &ap_msg);
335 rc = wait_for_completion_interruptible_timeout(
336 &work, PCICA_CLEANUP_TIME);
337 if (rc > 0)
338 rc = convert_response(zdev, &ap_msg, crt->outputdata,
339 crt->outputdatalength);
340 else {
341 /* Signal pending or message timed out. */
342 ap_cancel_message(zdev->ap_dev, &ap_msg);
343 if (rc == 0)
344 /* Message timed out. */
345 rc = -ETIME;
346 }
347out_free:
348 kfree(ap_msg.message);
349 return rc;
350}
351
352/**
353 * The crypto operations for a PCICA card.
354 */
355static struct zcrypt_ops zcrypt_pcica_ops = {
356 .rsa_modexpo = zcrypt_pcica_modexpo,
357 .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
358};
359
360/**
361 * Probe function for PCICA cards. It always accepts the AP device
362 * since the bus_match already checked the hardware type.
363 * @ap_dev: pointer to the AP device.
364 */
365static int zcrypt_pcica_probe(struct ap_device *ap_dev)
366{
367 struct zcrypt_device *zdev;
368 int rc;
369
370 zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
371 if (!zdev)
372 return -ENOMEM;
373 zdev->ap_dev = ap_dev;
374 zdev->ops = &zcrypt_pcica_ops;
375 zdev->online = 1;
376 zdev->user_space_type = ZCRYPT_PCICA;
377 zdev->type_string = "PCICA";
378 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
379 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
380 zdev->speed_rating = PCICA_SPEED_RATING;
381 ap_dev->reply = &zdev->reply;
382 ap_dev->private = zdev;
383 rc = zcrypt_device_register(zdev);
384 if (rc)
385 goto out_free;
386 return 0;
387
388out_free:
389 ap_dev->private = NULL;
390 zcrypt_device_free(zdev);
391 return rc;
392}
393
394/**
395 * This is called to remove the extended PCICA driver information
396 * if an AP device is removed.
397 */
398static void zcrypt_pcica_remove(struct ap_device *ap_dev)
399{
400 struct zcrypt_device *zdev = ap_dev->private;
401
402 zcrypt_device_unregister(zdev);
403}
404
405int __init zcrypt_pcica_init(void)
406{
407 return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
408}
409
410void zcrypt_pcica_exit(void)
411{
412 ap_driver_unregister(&zcrypt_pcica_driver);
413}
414
415#ifndef CONFIG_ZCRYPT_MONOLITHIC
416module_init(zcrypt_pcica_init);
417module_exit(zcrypt_pcica_exit);
418#endif
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
new file mode 100644
index 000000000000..3be11187f6df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -0,0 +1,117 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICA_H_
29#define _ZCRYPT_PCICA_H_
30
31/**
32 * The type 4 message family is associated with a PCICA card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type4_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x04 */
44 unsigned short msg_len;
45 unsigned char request_code; /* 0x40 */
46 unsigned char msg_fmt;
47 unsigned short reserved2;
48} __attribute__((packed));
49
50#define TYPE4_TYPE_CODE 0x04
51#define TYPE4_REQU_CODE 0x40
52
53#define TYPE4_SME_FMT 0x00
54#define TYPE4_LME_FMT 0x10
55#define TYPE4_SCR_FMT 0x40
56#define TYPE4_LCR_FMT 0x50
57
58/* Mod-Exp, with a small modulus */
59struct type4_sme {
60 struct type4_hdr header;
61 unsigned char message[128];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64} __attribute__((packed));
65
66/* Mod-Exp, with a large modulus */
67struct type4_lme {
68 struct type4_hdr header;
69 unsigned char message[256];
70 unsigned char exponent[256];
71 unsigned char modulus[256];
72} __attribute__((packed));
73
74/* CRT, with a small modulus */
75struct type4_scr {
76 struct type4_hdr header;
77 unsigned char message[128];
78 unsigned char dp[72];
79 unsigned char dq[64];
80 unsigned char p[72];
81 unsigned char q[64];
82 unsigned char u[72];
83} __attribute__((packed));
84
85/* CRT, with a large modulus */
86struct type4_lcr {
87 struct type4_hdr header;
88 unsigned char message[256];
89 unsigned char dp[136];
90 unsigned char dq[128];
91 unsigned char p[136];
92 unsigned char q[128];
93 unsigned char u[136];
94} __attribute__((packed));
95
96/**
97 * The type 84 response family is associated with a PCICA card.
98 *
99 * Note that all unsigned char arrays are right-justified and left-padded
100 * with zeroes.
101 *
102 * Note that all reserved fields must be zeroes.
103 */
104
105struct type84_hdr {
106 unsigned char reserved1;
107 unsigned char code;
108 unsigned short len;
109 unsigned char reserved2[4];
110} __attribute__((packed));
111
112#define TYPE84_RSP_CODE 0x84
113
114int zcrypt_pcica_init(void);
115void zcrypt_pcica_exit(void);
116
117#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
new file mode 100644
index 000000000000..f295a403b29a
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -0,0 +1,630 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcicc.h"
39#include "zcrypt_cca_key.h"
40
41#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
44
45/**
46 * PCICC cards need a speed rating of 0. This keeps them at the end of
47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
48 * used if no other cards are present because they are slow and can only
49 * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
50 * requests are rejected. The modexpo function encrypts PKCS12 padded data
51 * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
52 * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
53 * the data in the assumption that its PKCS12 encrypted data.
54 */
55#define PCICC_SPEED_RATING 0
56
57#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
58#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
59
60#define PCICC_CLEANUP_TIME (15*HZ)
61
62static struct ap_device_id zcrypt_pcicc_ids[] = {
63 { AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
64 { /* end of list */ },
65};
66
67#ifndef CONFIG_ZCRYPT_MONOLITHIC
68MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
69MODULE_AUTHOR("IBM Corporation");
70MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
71 "Copyright 2001, 2006 IBM Corporation");
72MODULE_LICENSE("GPL");
73#endif
74
75static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
76static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
77static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
78 struct ap_message *);
79
80static struct ap_driver zcrypt_pcicc_driver = {
81 .probe = zcrypt_pcicc_probe,
82 .remove = zcrypt_pcicc_remove,
83 .receive = zcrypt_pcicc_receive,
84 .ids = zcrypt_pcicc_ids,
85};
86
87/**
88 * The following is used to initialize the CPRB passed to the PCICC card
89 * in a type6 message. The 3 fields that must be filled in at execution
90 * time are req_parml, rpl_parml and usage_domain. Note that all three
91 * fields are *little*-endian. Actually, everything about this interface
92 * is ascii/little-endian, since the device has 'Intel inside'.
93 *
94 * The CPRB is followed immediately by the parm block.
95 * The parm block contains:
96 * - function code ('PD' 0x5044 or 'PK' 0x504B)
97 * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
98 * - VUD block
99 */
100static struct CPRB static_cprb = {
101 .cprb_len = __constant_cpu_to_le16(0x0070),
102 .cprb_ver_id = 0x41,
103 .func_id = {0x54,0x32},
104 .checkpoint_flag= 0x01,
105 .svr_namel = __constant_cpu_to_le16(0x0008),
106 .svr_name = {'I','C','S','F',' ',' ',' ',' '}
107};
108
109/**
110 * Check the message for PKCS11 padding.
111 */
112static inline int is_PKCS11_padded(unsigned char *buffer, int length)
113{
114 int i;
115 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
116 return 0;
117 for (i = 2; i < length; i++)
118 if (buffer[i] != 0xFF)
119 break;
120 if (i < 10 || i == length)
121 return 0;
122 if (buffer[i] != 0x00)
123 return 0;
124 return 1;
125}
126
127/**
128 * Check the message for PKCS12 padding.
129 */
130static inline int is_PKCS12_padded(unsigned char *buffer, int length)
131{
132 int i;
133 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
134 return 0;
135 for (i = 2; i < length; i++)
136 if (buffer[i] == 0x00)
137 break;
138 if ((i < 10) || (i == length))
139 return 0;
140 if (buffer[i] != 0x00)
141 return 0;
142 return 1;
143}
144
145/**
146 * Convert a ICAMEX message to a type6 MEX message.
147 *
148 * @zdev: crypto device pointer
149 * @zreq: crypto request pointer
150 * @mex: pointer to user input data
151 *
152 * Returns 0 on success or -EFAULT.
153 */
154static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
155 struct ap_message *ap_msg,
156 struct ica_rsa_modexpo *mex)
157{
158 static struct type6_hdr static_type6_hdr = {
159 .type = 0x06,
160 .offset1 = 0x00000058,
161 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
162 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
163 .function_code = {'P','K'},
164 };
165 static struct function_and_rules_block static_pke_function_and_rules ={
166 .function_code = {'P','K'},
167 .ulen = __constant_cpu_to_le16(10),
168 .only_rule = {'P','K','C','S','-','1','.','2'}
169 };
170 struct {
171 struct type6_hdr hdr;
172 struct CPRB cprb;
173 struct function_and_rules_block fr;
174 unsigned short length;
175 char text[0];
176 } __attribute__((packed)) *msg = ap_msg->message;
177 int vud_len, pad_len, size;
178
179 /* VUD.ciphertext */
180 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
181 return -EFAULT;
182
183 if (is_PKCS11_padded(msg->text, mex->inputdatalength))
184 return -EINVAL;
185
186 /* static message header and f&r */
187 msg->hdr = static_type6_hdr;
188 msg->fr = static_pke_function_and_rules;
189
190 if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
191 /* strip the padding and adjust the data length */
192 pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
193 if (pad_len <= 9 || pad_len >= mex->inputdatalength)
194 return -ENODEV;
195 vud_len = mex->inputdatalength - pad_len;
196 memmove(msg->text, msg->text + pad_len, vud_len);
197 msg->length = cpu_to_le16(vud_len + 2);
198
199 /* Set up key after the variable length text. */
200 size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
201 if (size < 0)
202 return size;
203 size += sizeof(*msg) + vud_len; /* total size of msg */
204 } else {
205 vud_len = mex->inputdatalength;
206 msg->length = cpu_to_le16(2 + vud_len);
207
208 msg->hdr.function_code[1] = 'D';
209 msg->fr.function_code[1] = 'D';
210
211 /* Set up key after the variable length text. */
212 size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
213 if (size < 0)
214 return size;
215 size += sizeof(*msg) + vud_len; /* total size of msg */
216 }
217
218 /* message header, cprb and f&r */
219 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
220 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
221
222 msg->cprb = static_cprb;
223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
224 msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
225 sizeof(msg->cprb));
226 msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
227
228 ap_msg->length = (size + 3) & -4;
229 return 0;
230}
231
232/**
233 * Convert a ICACRT message to a type6 CRT message.
234 *
235 * @zdev: crypto device pointer
236 * @zreq: crypto request pointer
237 * @crt: pointer to user input data
238 *
239 * Returns 0 on success or -EFAULT.
240 */
241static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
242 struct ap_message *ap_msg,
243 struct ica_rsa_modexpo_crt *crt)
244{
245 static struct type6_hdr static_type6_hdr = {
246 .type = 0x06,
247 .offset1 = 0x00000058,
248 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
249 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
250 .function_code = {'P','D'},
251 };
252 static struct function_and_rules_block static_pkd_function_and_rules ={
253 .function_code = {'P','D'},
254 .ulen = __constant_cpu_to_le16(10),
255 .only_rule = {'P','K','C','S','-','1','.','2'}
256 };
257 struct {
258 struct type6_hdr hdr;
259 struct CPRB cprb;
260 struct function_and_rules_block fr;
261 unsigned short length;
262 char text[0];
263 } __attribute__((packed)) *msg = ap_msg->message;
264 int size;
265
266 /* VUD.ciphertext */
267 msg->length = cpu_to_le16(2 + crt->inputdatalength);
268 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
269 return -EFAULT;
270
271 if (is_PKCS11_padded(msg->text, crt->inputdatalength))
272 return -EINVAL;
273
274 /* Set up key after the variable length text. */
275 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
276 if (size < 0)
277 return size;
278 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
279
280 /* message header, cprb and f&r */
281 msg->hdr = static_type6_hdr;
282 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
283 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
284
285 msg->cprb = static_cprb;
286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
287 msg->cprb.req_parml = msg->cprb.rpl_parml =
288 cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
289
290 msg->fr = static_pkd_function_and_rules;
291
292 ap_msg->length = (size + 3) & -4;
293 return 0;
294}
295
296/**
297 * Copy results from a type 86 reply message back to user space.
298 *
299 * @zdev: crypto device pointer
300 * @reply: reply AP message.
301 * @data: pointer to user output data
302 * @length: size of user output data
303 *
304 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
305 */
306struct type86_reply {
307 struct type86_hdr hdr;
308 struct type86_fmt2_ext fmt2;
309 struct CPRB cprb;
310 unsigned char pad[4]; /* 4 byte function code/rules block ? */
311 unsigned short length;
312 char text[0];
313} __attribute__((packed));
314
315static int convert_type86(struct zcrypt_device *zdev,
316 struct ap_message *reply,
317 char __user *outputdata,
318 unsigned int outputdatalength)
319{
320 static unsigned char static_pad[] = {
321 0x00,0x02,
322 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
323 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
324 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
325 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
326 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
327 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
328 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
329 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
330 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
331 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
332 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
333 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
334 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
335 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
336 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
337 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
338 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
339 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
340 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
341 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
342 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
343 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
344 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
345 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
346 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
347 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
348 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
349 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
350 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
351 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
352 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
353 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
354 };
355 struct type86_reply *msg = reply->message;
356 unsigned short service_rc, service_rs;
357 unsigned int reply_len, pad_len;
358 char *data;
359
360 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
361 if (unlikely(service_rc != 0)) {
362 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
363 if (service_rc == 8 && service_rs == 66) {
364 PDEBUG("Bad block format on PCICC\n");
365 return -EINVAL;
366 }
367 if (service_rc == 8 && service_rs == 65) {
368 PDEBUG("Probably an even modulus on PCICC\n");
369 return -EINVAL;
370 }
371 if (service_rc == 8 && service_rs == 770) {
372 PDEBUG("Invalid key length on PCICC\n");
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN;
375 }
376 if (service_rc == 8 && service_rs == 783) {
377 PDEBUG("Extended bitlengths not enabled on PCICC\n");
378 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
379 return -EAGAIN;
380 }
381 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
382 service_rc, service_rs);
383 zdev->online = 0;
384 return -EAGAIN; /* repeat the request on a different device. */
385 }
386 data = msg->text;
387 reply_len = le16_to_cpu(msg->length) - 2;
388 if (reply_len > outputdatalength)
389 return -EINVAL;
390 /**
391 * For all encipher requests, the length of the ciphertext (reply_len)
392 * will always equal the modulus length. For MEX decipher requests
393 * the output needs to get padded. Minimum pad size is 10.
394 *
395 * Currently, the cases where padding will be added is for:
396 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
397 * ZERO-PAD and CRT is only supported for PKD requests)
398 * - PCICC, always
399 */
400 pad_len = outputdatalength - reply_len;
401 if (pad_len > 0) {
402 if (pad_len < 10)
403 return -EINVAL;
404 /* 'restore' padding left in the PCICC/PCIXCC card. */
405 if (copy_to_user(outputdata, static_pad, pad_len - 1))
406 return -EFAULT;
407 if (put_user(0, outputdata + pad_len - 1))
408 return -EFAULT;
409 }
410 /* Copy the crypto response to user space. */
411 if (copy_to_user(outputdata + pad_len, data, reply_len))
412 return -EFAULT;
413 return 0;
414}
415
416static int convert_response(struct zcrypt_device *zdev,
417 struct ap_message *reply,
418 char __user *outputdata,
419 unsigned int outputdatalength)
420{
421 struct type86_reply *msg = reply->message;
422
423 /* Response type byte is the second byte in the response. */
424 switch (msg->hdr.type) {
425 case TYPE82_RSP_CODE:
426 case TYPE88_RSP_CODE:
427 return convert_error(zdev, reply);
428 case TYPE86_RSP_CODE:
429 if (msg->hdr.reply_code)
430 return convert_error(zdev, reply);
431 if (msg->cprb.cprb_ver_id == 0x01)
432 return convert_type86(zdev, reply,
433 outputdata, outputdatalength);
434 /* no break, incorrect cprb version is an unknown response */
435 default: /* Unknown response type, this should NEVER EVER happen */
436 PRINTK("Unrecognized Message Header: %08x%08x\n",
437 *(unsigned int *) reply->message,
438 *(unsigned int *) (reply->message+4));
439 zdev->online = 0;
440 return -EAGAIN; /* repeat the request on a different device. */
441 }
442}
443
444/**
445 * This function is called from the AP bus code after a crypto request
446 * "msg" has finished with the reply message "reply".
447 * It is called from tasklet context.
448 * @ap_dev: pointer to the AP device
449 * @msg: pointer to the AP message
450 * @reply: pointer to the AP reply message
451 */
452static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
453 struct ap_message *msg,
454 struct ap_message *reply)
455{
456 static struct error_hdr error_reply = {
457 .type = TYPE82_RSP_CODE,
458 .reply_code = REP82_ERROR_MACHINE_FAILURE,
459 };
460 struct type86_reply *t86r = reply->message;
461 int length;
462
463 /* Copy the reply message to the request message buffer. */
464 if (IS_ERR(reply))
465 memcpy(msg->message, &error_reply, sizeof(error_reply));
466 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
467 t86r->cprb.cprb_ver_id == 0x01) {
468 length = sizeof(struct type86_reply) + t86r->length - 2;
469 length = min(PCICC_MAX_RESPONSE_SIZE, length);
470 memcpy(msg->message, reply->message, length);
471 } else
472 memcpy(msg->message, reply->message, sizeof error_reply);
473 complete((struct completion *) msg->private);
474}
475
476static atomic_t zcrypt_step = ATOMIC_INIT(0);
477
478/**
479 * The request distributor calls this function if it picked the PCICC
480 * device to handle a modexpo request.
481 * @zdev: pointer to zcrypt_device structure that identifies the
482 * PCICC device to the request distributor
483 * @mex: pointer to the modexpo request buffer
484 */
485static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
486 struct ica_rsa_modexpo *mex)
487{
488 struct ap_message ap_msg;
489 struct completion work;
490 int rc;
491
492 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
493 if (!ap_msg.message)
494 return -ENOMEM;
495 ap_msg.length = PAGE_SIZE;
496 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
497 atomic_inc_return(&zcrypt_step);
498 ap_msg.private = &work;
499 rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
500 if (rc)
501 goto out_free;
502 init_completion(&work);
503 ap_queue_message(zdev->ap_dev, &ap_msg);
504 rc = wait_for_completion_interruptible_timeout(
505 &work, PCICC_CLEANUP_TIME);
506 if (rc > 0)
507 rc = convert_response(zdev, &ap_msg, mex->outputdata,
508 mex->outputdatalength);
509 else {
510 /* Signal pending or message timed out. */
511 ap_cancel_message(zdev->ap_dev, &ap_msg);
512 if (rc == 0)
513 /* Message timed out. */
514 rc = -ETIME;
515 }
516out_free:
517 free_page((unsigned long) ap_msg.message);
518 return rc;
519}
520
521/**
522 * The request distributor calls this function if it picked the PCICC
523 * device to handle a modexpo_crt request.
524 * @zdev: pointer to zcrypt_device structure that identifies the
525 * PCICC device to the request distributor
526 * @crt: pointer to the modexpoc_crt request buffer
527 */
528static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
529 struct ica_rsa_modexpo_crt *crt)
530{
531 struct ap_message ap_msg;
532 struct completion work;
533 int rc;
534
535 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
536 if (!ap_msg.message)
537 return -ENOMEM;
538 ap_msg.length = PAGE_SIZE;
539 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
540 atomic_inc_return(&zcrypt_step);
541 ap_msg.private = &work;
542 rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
543 if (rc)
544 goto out_free;
545 init_completion(&work);
546 ap_queue_message(zdev->ap_dev, &ap_msg);
547 rc = wait_for_completion_interruptible_timeout(
548 &work, PCICC_CLEANUP_TIME);
549 if (rc > 0)
550 rc = convert_response(zdev, &ap_msg, crt->outputdata,
551 crt->outputdatalength);
552 else {
553 /* Signal pending or message timed out. */
554 ap_cancel_message(zdev->ap_dev, &ap_msg);
555 if (rc == 0)
556 /* Message timed out. */
557 rc = -ETIME;
558 }
559out_free:
560 free_page((unsigned long) ap_msg.message);
561 return rc;
562}
563
564/**
565 * The crypto operations for a PCICC card.
566 */
567static struct zcrypt_ops zcrypt_pcicc_ops = {
568 .rsa_modexpo = zcrypt_pcicc_modexpo,
569 .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
570};
571
572/**
573 * Probe function for PCICC cards. It always accepts the AP device
574 * since the bus_match already checked the hardware type.
575 * @ap_dev: pointer to the AP device.
576 */
577static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
578{
579 struct zcrypt_device *zdev;
580 int rc;
581
582 zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
583 if (!zdev)
584 return -ENOMEM;
585 zdev->ap_dev = ap_dev;
586 zdev->ops = &zcrypt_pcicc_ops;
587 zdev->online = 1;
588 zdev->user_space_type = ZCRYPT_PCICC;
589 zdev->type_string = "PCICC";
590 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
591 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
592 zdev->speed_rating = PCICC_SPEED_RATING;
593 ap_dev->reply = &zdev->reply;
594 ap_dev->private = zdev;
595 rc = zcrypt_device_register(zdev);
596 if (rc)
597 goto out_free;
598 return 0;
599
600 out_free:
601 ap_dev->private = NULL;
602 zcrypt_device_free(zdev);
603 return rc;
604}
605
606/**
607 * This is called to remove the extended PCICC driver information
608 * if an AP device is removed.
609 */
610static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
611{
612 struct zcrypt_device *zdev = ap_dev->private;
613
614 zcrypt_device_unregister(zdev);
615}
616
617int __init zcrypt_pcicc_init(void)
618{
619 return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
620}
621
622void zcrypt_pcicc_exit(void)
623{
624 ap_driver_unregister(&zcrypt_pcicc_driver);
625}
626
627#ifndef CONFIG_ZCRYPT_MONOLITHIC
628module_init(zcrypt_pcicc_init);
629module_exit(zcrypt_pcicc_exit);
630#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
new file mode 100644
index 000000000000..6d4454846c8f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -0,0 +1,176 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICC_H_
29#define _ZCRYPT_PCICC_H_
30
31/**
32 * The type 6 message family is associated with PCICC or PCIXCC cards.
33 *
34 * It contains a message header followed by a CPRB, both of which
35 * are described below.
36 *
37 * Note that all reserved fields must be zeroes.
38 */
39struct type6_hdr {
40 unsigned char reserved1; /* 0x00 */
41 unsigned char type; /* 0x06 */
42 unsigned char reserved2[2]; /* 0x0000 */
43 unsigned char right[4]; /* 0x00000000 */
44 unsigned char reserved3[2]; /* 0x0000 */
45 unsigned char reserved4[2]; /* 0x0000 */
46 unsigned char apfs[4]; /* 0x00000000 */
47 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
48 unsigned int offset2; /* 0x00000000 */
49 unsigned int offset3; /* 0x00000000 */
50 unsigned int offset4; /* 0x00000000 */
51 unsigned char agent_id[16]; /* PCICC: */
52 /* 0x0100 */
53 /* 0x4343412d4150504c202020 */
54 /* 0x010101 */
55 /* PCIXCC: */
56 /* 0x4341000000000000 */
57 /* 0x0000000000000000 */
58 unsigned char rqid[2]; /* rqid. internal to 603 */
59 unsigned char reserved5[2]; /* 0x0000 */
60 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
61 unsigned char reserved6[2]; /* 0x0000 */
62 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
63 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
64 unsigned int ToCardLen3; /* 0x00000000 */
65 unsigned int ToCardLen4; /* 0x00000000 */
66 unsigned int FromCardLen1; /* response buffer length */
67 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
68 unsigned int FromCardLen3; /* 0x00000000 */
69 unsigned int FromCardLen4; /* 0x00000000 */
70} __attribute__((packed));
71
72/**
73 * CPRB
74 * Note that all shorts, ints and longs are little-endian.
75 * All pointer fields are 32-bits long, and mean nothing
76 *
77 * A request CPRB is followed by a request_parameter_block.
78 *
79 * The request (or reply) parameter block is organized thus:
80 * function code
81 * VUD block
82 * key block
83 */
84struct CPRB {
85 unsigned short cprb_len; /* CPRB length */
86 unsigned char cprb_ver_id; /* CPRB version id. */
87 unsigned char pad_000; /* Alignment pad byte. */
88 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
89 unsigned char srpi_verb; /* SRPI verb type */
90 unsigned char flags; /* flags */
91 unsigned char func_id[2]; /* function id */
92 unsigned char checkpoint_flag; /* */
93 unsigned char resv2; /* reserved */
94 unsigned short req_parml; /* request parameter buffer */
95 /* length 16-bit little endian */
96 unsigned char req_parmp[4]; /* request parameter buffer *
97 * pointer (means nothing: the *
98 * parameter buffer follows *
99 * the CPRB). */
100 unsigned char req_datal[4]; /* request data buffer */
101 /* length ULELONG */
102 unsigned char req_datap[4]; /* request data buffer */
103 /* pointer */
104 unsigned short rpl_parml; /* reply parameter buffer */
105 /* length 16-bit little endian */
106 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
107 unsigned char rpl_parmp[4]; /* reply parameter buffer *
108 * pointer (means nothing: the *
109 * parameter buffer follows *
110 * the CPRB). */
111 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
112 unsigned char rpl_datap[4]; /* reply data buffer */
113 /* pointer */
114 unsigned short ccp_rscode; /* server reason code ULESHORT */
115 unsigned short ccp_rtcode; /* server return code ULESHORT */
116 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
117 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
118 unsigned char repd_datal[4]; /* replied data length ULELONG */
119 unsigned char req_pc[2]; /* PC identifier */
120 unsigned char res_origin[8]; /* resource origin */
121 unsigned char mac_value[8]; /* Mac Value */
122 unsigned char logon_id[8]; /* Logon Identifier */
123 unsigned char usage_domain[2]; /* cdx */
124 unsigned char resv3[18]; /* reserved for requestor */
125 unsigned short svr_namel; /* server name length ULESHORT */
126 unsigned char svr_name[8]; /* server name */
127} __attribute__((packed));
128
129/**
130 * The type 86 message family is associated with PCICC and PCIXCC cards.
131 *
132 * It contains a message header followed by a CPRB. The CPRB is
133 * the same as the request CPRB, which is described above.
134 *
135 * If format is 1, an error condition exists and no data beyond
136 * the 8-byte message header is of interest.
137 *
138 * The non-error message is shown below.
139 *
140 * Note that all reserved fields must be zeroes.
141 */
142struct type86_hdr {
143 unsigned char reserved1; /* 0x00 */
144 unsigned char type; /* 0x86 */
145 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
146 unsigned char reserved2; /* 0x00 */
147 unsigned char reply_code; /* reply code (see above) */
148 unsigned char reserved3[3]; /* 0x000000 */
149} __attribute__((packed));
150
151#define TYPE86_RSP_CODE 0x86
152#define TYPE86_FMT2 0x02
153
154struct type86_fmt2_ext {
155 unsigned char reserved[4]; /* 0x00000000 */
156 unsigned char apfs[4]; /* final status */
157 unsigned int count1; /* length of CPRB + parameters */
158 unsigned int offset1; /* offset to CPRB */
159 unsigned int count2; /* 0x00000000 */
160 unsigned int offset2; /* db offset 0x00000000 for PKD */
161 unsigned int count3; /* 0x00000000 */
162 unsigned int offset3; /* 0x00000000 */
163 unsigned int count4; /* 0x00000000 */
164 unsigned int offset4; /* 0x00000000 */
165} __attribute__((packed));
166
167struct function_and_rules_block {
168 unsigned char function_code[2];
169 unsigned short ulen;
170 unsigned char only_rule[8];
171} __attribute__((packed));
172
173int zcrypt_pcicc_init(void);
174void zcrypt_pcicc_exit(void);
175
176#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 000000000000..2da8b9381407
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,951 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/delay.h>
33#include <asm/atomic.h>
34#include <asm/uaccess.h>
35
36#include "ap_bus.h"
37#include "zcrypt_api.h"
38#include "zcrypt_error.h"
39#include "zcrypt_pcicc.h"
40#include "zcrypt_pcixcc.h"
41#include "zcrypt_cca_key.h"
42
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
46
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
48#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540
50
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
53
54#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
55#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
56#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
57#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
58
59#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
60
61#define PCIXCC_CLEANUP_TIME (15*HZ)
62
63#define CEIL4(x) ((((x)+3)/4)*4)
64
65struct response_type {
66 struct completion work;
67 int type;
68};
69#define PCIXCC_RESPONSE_TYPE_ICA 0
70#define PCIXCC_RESPONSE_TYPE_XCRB 1
71
72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { /* end of list */ },
76};
77
78#ifndef CONFIG_ZCRYPT_MONOLITHIC
79MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
80MODULE_AUTHOR("IBM Corporation");
81MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
82 "Copyright 2001, 2006 IBM Corporation");
83MODULE_LICENSE("GPL");
84#endif
85
86static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
87static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
88static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
89 struct ap_message *);
90
91static struct ap_driver zcrypt_pcixcc_driver = {
92 .probe = zcrypt_pcixcc_probe,
93 .remove = zcrypt_pcixcc_remove,
94 .receive = zcrypt_pcixcc_receive,
95 .ids = zcrypt_pcixcc_ids,
96};
97
98/**
99 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
100 * card in a type6 message. The 3 fields that must be filled in at execution
101 * time are req_parml, rpl_parml and usage_domain.
102 * Everything about this interface is ascii/big-endian, since the
103 * device does *not* have 'Intel inside'.
104 *
105 * The CPRBX is followed immediately by the parm block.
106 * The parm block contains:
107 * - function code ('PD' 0x5044 or 'PK' 0x504B)
108 * - rule block (one of:)
109 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
110 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
111 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
112 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
113 * - VUD block
114 */
115static struct CPRBX static_cprbx = {
116 .cprb_len = 0x00DC,
117 .cprb_ver_id = 0x02,
118 .func_id = {0x54,0x32},
119};
120
121/**
122 * Convert a ICAMEX message to a type6 MEX message.
123 *
124 * @zdev: crypto device pointer
125 * @ap_msg: pointer to AP message
126 * @mex: pointer to user input data
127 *
128 * Returns 0 on success or -EFAULT.
129 */
130static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
131 struct ap_message *ap_msg,
132 struct ica_rsa_modexpo *mex)
133{
134 static struct type6_hdr static_type6_hdrX = {
135 .type = 0x06,
136 .offset1 = 0x00000058,
137 .agent_id = {'C','A',},
138 .function_code = {'P','K'},
139 };
140 static struct function_and_rules_block static_pke_fnr = {
141 .function_code = {'P','K'},
142 .ulen = 10,
143 .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
144 };
145 static struct function_and_rules_block static_pke_fnr_MCL2 = {
146 .function_code = {'P','K'},
147 .ulen = 10,
148 .only_rule = {'Z','E','R','O','-','P','A','D'}
149 };
150 struct {
151 struct type6_hdr hdr;
152 struct CPRBX cprbx;
153 struct function_and_rules_block fr;
154 unsigned short length;
155 char text[0];
156 } __attribute__((packed)) *msg = ap_msg->message;
157 int size;
158
159 /* VUD.ciphertext */
160 msg->length = mex->inputdatalength + 2;
161 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
162 return -EFAULT;
163
164 /* Set up key which is located after the variable length text. */
165 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
166 if (size < 0)
167 return size;
168 size += sizeof(*msg) + mex->inputdatalength;
169
170 /* message header, cprbx and f&r */
171 msg->hdr = static_type6_hdrX;
172 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
173 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
174
175 msg->cprbx = static_cprbx;
176 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
177 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
178
179 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
180 static_pke_fnr_MCL2 : static_pke_fnr;
181
182 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
183
184 ap_msg->length = size;
185 return 0;
186}
187
188/**
189 * Convert a ICACRT message to a type6 CRT message.
190 *
191 * @zdev: crypto device pointer
192 * @ap_msg: pointer to AP message
193 * @crt: pointer to user input data
194 *
195 * Returns 0 on success or -EFAULT.
196 */
197static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
198 struct ap_message *ap_msg,
199 struct ica_rsa_modexpo_crt *crt)
200{
201 static struct type6_hdr static_type6_hdrX = {
202 .type = 0x06,
203 .offset1 = 0x00000058,
204 .agent_id = {'C','A',},
205 .function_code = {'P','D'},
206 };
207 static struct function_and_rules_block static_pkd_fnr = {
208 .function_code = {'P','D'},
209 .ulen = 10,
210 .only_rule = {'Z','E','R','O','-','P','A','D'}
211 };
212
213 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
214 .function_code = {'P','D'},
215 .ulen = 10,
216 .only_rule = {'P','K','C','S','-','1','.','2'}
217 };
218 struct {
219 struct type6_hdr hdr;
220 struct CPRBX cprbx;
221 struct function_and_rules_block fr;
222 unsigned short length;
223 char text[0];
224 } __attribute__((packed)) *msg = ap_msg->message;
225 int size;
226
227 /* VUD.ciphertext */
228 msg->length = crt->inputdatalength + 2;
229 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
230 return -EFAULT;
231
232 /* Set up key which is located after the variable length text. */
233 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
234 if (size < 0)
235 return size;
236 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
237
238 /* message header, cprbx and f&r */
239 msg->hdr = static_type6_hdrX;
240 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
241 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
242
243 msg->cprbx = static_cprbx;
244 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
245 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
246 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
247
248 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
249 static_pkd_fnr_MCL2 : static_pkd_fnr;
250
251 ap_msg->length = size;
252 return 0;
253}
254
255/**
256 * Convert a XCRB message to a type6 CPRB message.
257 *
258 * @zdev: crypto device pointer
259 * @ap_msg: pointer to AP message
260 * @xcRB: pointer to user input data
261 *
262 * Returns 0 on success or -EFAULT.
263 */
264struct type86_fmt2_msg {
265 struct type86_hdr hdr;
266 struct type86_fmt2_ext fmt2;
267} __attribute__((packed));
268
269static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
270 struct ap_message *ap_msg,
271 struct ica_xcRB *xcRB)
272{
273 static struct type6_hdr static_type6_hdrX = {
274 .type = 0x06,
275 .offset1 = 0x00000058,
276 };
277 struct {
278 struct type6_hdr hdr;
279 struct ica_CPRBX cprbx;
280 } __attribute__((packed)) *msg = ap_msg->message;
281
282 int rcblen = CEIL4(xcRB->request_control_blk_length);
283 int replylen;
284 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
285 char *function_code;
286
287 /* length checks */
288 ap_msg->length = sizeof(struct type6_hdr) +
289 CEIL4(xcRB->request_control_blk_length) +
290 xcRB->request_data_length;
291 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) {
292 PRINTK("Combined message is too large (%ld/%d/%d).\n",
293 sizeof(struct type6_hdr),
294 xcRB->request_control_blk_length,
295 xcRB->request_data_length);
296 return -EFAULT;
297 }
298 if (CEIL4(xcRB->reply_control_blk_length) >
299 PCIXCC_MAX_XCRB_REPLY_SIZE) {
300 PDEBUG("Reply CPRB length is too large (%d).\n",
301 xcRB->request_control_blk_length);
302 return -EFAULT;
303 }
304 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
305 PDEBUG("Reply data block length is too large (%d).\n",
306 xcRB->reply_data_length);
307 return -EFAULT;
308 }
309 replylen = CEIL4(xcRB->reply_control_blk_length) +
310 CEIL4(xcRB->reply_data_length) +
311 sizeof(struct type86_fmt2_msg);
312 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
313 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
314 " (%d/%d/%d).\n",
315 sizeof(struct type86_fmt2_msg),
316 xcRB->reply_control_blk_length,
317 xcRB->reply_data_length);
318 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
319 (sizeof(struct type86_fmt2_msg) +
320 CEIL4(xcRB->reply_data_length));
321 PDEBUG("Capping Reply CPRB length at %d\n",
322 xcRB->reply_control_blk_length);
323 }
324
325 /* prepare type6 header */
326 msg->hdr = static_type6_hdrX;
327 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
328 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
329 if (xcRB->request_data_length) {
330 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
331 msg->hdr.ToCardLen2 = xcRB->request_data_length;
332 }
333 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
334 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
335
336 /* prepare CPRB */
337 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
338 xcRB->request_control_blk_length))
339 return -EFAULT;
340 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
341 xcRB->request_control_blk_length) {
342 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
343 xcRB->request_control_blk_length);
344 return -EFAULT;
345 }
346 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
347 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
348
349 /* copy data block */
350 if (xcRB->request_data_length &&
351 copy_from_user(req_data, xcRB->request_data_address,
352 xcRB->request_data_length))
353 return -EFAULT;
354 return 0;
355}
356
357/**
358 * Copy results from a type 86 ICA reply message back to user space.
359 *
360 * @zdev: crypto device pointer
361 * @reply: reply AP message.
362 * @data: pointer to user output data
363 * @length: size of user output data
364 *
365 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
366 */
367struct type86x_reply {
368 struct type86_hdr hdr;
369 struct type86_fmt2_ext fmt2;
370 struct CPRBX cprbx;
371 unsigned char pad[4]; /* 4 byte function code/rules block ? */
372 unsigned short length;
373 char text[0];
374} __attribute__((packed));
375
376static int convert_type86_ica(struct zcrypt_device *zdev,
377 struct ap_message *reply,
378 char __user *outputdata,
379 unsigned int outputdatalength)
380{
381 static unsigned char static_pad[] = {
382 0x00,0x02,
383 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
384 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
385 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
386 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
387 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
388 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
389 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
390 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
391 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
392 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
393 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
394 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
395 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
396 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
397 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
398 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
399 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
400 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
401 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
402 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
403 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
404 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
405 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
406 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
407 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
408 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
409 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
410 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
411 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
412 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
413 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
414 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
415 };
416 struct type86x_reply *msg = reply->message;
417 unsigned short service_rc, service_rs;
418 unsigned int reply_len, pad_len;
419 char *data;
420
421 service_rc = msg->cprbx.ccp_rtcode;
422 if (unlikely(service_rc != 0)) {
423 service_rs = msg->cprbx.ccp_rscode;
424 if (service_rc == 8 && service_rs == 66) {
425 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
426 return -EINVAL;
427 }
428 if (service_rc == 8 && service_rs == 65) {
429 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
430 return -EINVAL;
431 }
432 if (service_rc == 8 && service_rs == 770) {
433 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
434 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
435 return -EAGAIN;
436 }
437 if (service_rc == 8 && service_rs == 783) {
438 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
439 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
440 return -EAGAIN;
441 }
442 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
443 service_rc, service_rs);
444 zdev->online = 0;
445 return -EAGAIN; /* repeat the request on a different device. */
446 }
447 data = msg->text;
448 reply_len = msg->length - 2;
449 if (reply_len > outputdatalength)
450 return -EINVAL;
451 /**
452 * For all encipher requests, the length of the ciphertext (reply_len)
453 * will always equal the modulus length. For MEX decipher requests
454 * the output needs to get padded. Minimum pad size is 10.
455 *
456 * Currently, the cases where padding will be added is for:
457 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
458 * ZERO-PAD and CRT is only supported for PKD requests)
459 * - PCICC, always
460 */
461 pad_len = outputdatalength - reply_len;
462 if (pad_len > 0) {
463 if (pad_len < 10)
464 return -EINVAL;
465 /* 'restore' padding left in the PCICC/PCIXCC card. */
466 if (copy_to_user(outputdata, static_pad, pad_len - 1))
467 return -EFAULT;
468 if (put_user(0, outputdata + pad_len - 1))
469 return -EFAULT;
470 }
471 /* Copy the crypto response to user space. */
472 if (copy_to_user(outputdata + pad_len, data, reply_len))
473 return -EFAULT;
474 return 0;
475}
476
477/**
478 * Copy results from a type 86 XCRB reply message back to user space.
479 *
480 * @zdev: crypto device pointer
481 * @reply: reply AP message.
482 * @xcRB: pointer to XCRB
483 *
484 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
485 */
486static int convert_type86_xcrb(struct zcrypt_device *zdev,
487 struct ap_message *reply,
488 struct ica_xcRB *xcRB)
489{
490 struct type86_fmt2_msg *msg = reply->message;
491 char *data = reply->message;
492
493 /* Copy CPRB to user */
494 if (copy_to_user(xcRB->reply_control_blk_addr,
495 data + msg->fmt2.offset1, msg->fmt2.count1))
496 return -EFAULT;
497 xcRB->reply_control_blk_length = msg->fmt2.count1;
498
499 /* Copy data buffer to user */
500 if (msg->fmt2.count2)
501 if (copy_to_user(xcRB->reply_data_addr,
502 data + msg->fmt2.offset2, msg->fmt2.count2))
503 return -EFAULT;
504 xcRB->reply_data_length = msg->fmt2.count2;
505 return 0;
506}
507
508static int convert_response_ica(struct zcrypt_device *zdev,
509 struct ap_message *reply,
510 char __user *outputdata,
511 unsigned int outputdatalength)
512{
513 struct type86x_reply *msg = reply->message;
514
515 /* Response type byte is the second byte in the response. */
516 switch (((unsigned char *) reply->message)[1]) {
517 case TYPE82_RSP_CODE:
518 case TYPE88_RSP_CODE:
519 return convert_error(zdev, reply);
520 case TYPE86_RSP_CODE:
521 if (msg->hdr.reply_code)
522 return convert_error(zdev, reply);
523 if (msg->cprbx.cprb_ver_id == 0x02)
524 return convert_type86_ica(zdev, reply,
525 outputdata, outputdatalength);
526 /* no break, incorrect cprb version is an unknown response */
527 default: /* Unknown response type, this should NEVER EVER happen */
528 PRINTK("Unrecognized Message Header: %08x%08x\n",
529 *(unsigned int *) reply->message,
530 *(unsigned int *) (reply->message+4));
531 zdev->online = 0;
532 return -EAGAIN; /* repeat the request on a different device. */
533 }
534}
535
536static int convert_response_xcrb(struct zcrypt_device *zdev,
537 struct ap_message *reply,
538 struct ica_xcRB *xcRB)
539{
540 struct type86x_reply *msg = reply->message;
541
542 /* Response type byte is the second byte in the response. */
543 switch (((unsigned char *) reply->message)[1]) {
544 case TYPE82_RSP_CODE:
545 case TYPE88_RSP_CODE:
546 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
547 return convert_error(zdev, reply);
548 case TYPE86_RSP_CODE:
549 if (msg->hdr.reply_code) {
550 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
551 return convert_error(zdev, reply);
552 }
553 if (msg->cprbx.cprb_ver_id == 0x02)
554 return convert_type86_xcrb(zdev, reply, xcRB);
555 /* no break, incorrect cprb version is an unknown response */
556 default: /* Unknown response type, this should NEVER EVER happen */
557 PRINTK("Unrecognized Message Header: %08x%08x\n",
558 *(unsigned int *) reply->message,
559 *(unsigned int *) (reply->message+4));
560 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
561 zdev->online = 0;
562 return -EAGAIN; /* repeat the request on a different device. */
563 }
564}
565
566/**
567 * This function is called from the AP bus code after a crypto request
568 * "msg" has finished with the reply message "reply".
569 * It is called from tasklet context.
570 * @ap_dev: pointer to the AP device
571 * @msg: pointer to the AP message
572 * @reply: pointer to the AP reply message
573 */
574static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
575 struct ap_message *msg,
576 struct ap_message *reply)
577{
578 static struct error_hdr error_reply = {
579 .type = TYPE82_RSP_CODE,
580 .reply_code = REP82_ERROR_MACHINE_FAILURE,
581 };
582 struct response_type *resp_type =
583 (struct response_type *) msg->private;
584 struct type86x_reply *t86r = reply->message;
585 int length;
586
587 /* Copy the reply message to the request message buffer. */
588 if (IS_ERR(reply))
589 memcpy(msg->message, &error_reply, sizeof(error_reply));
590 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
591 t86r->cprbx.cprb_ver_id == 0x02) {
592 switch (resp_type->type) {
593 case PCIXCC_RESPONSE_TYPE_ICA:
594 length = sizeof(struct type86x_reply)
595 + t86r->length - 2;
596 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
597 memcpy(msg->message, reply->message, length);
598 break;
599 case PCIXCC_RESPONSE_TYPE_XCRB:
600 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
601 length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
602 memcpy(msg->message, reply->message, length);
603 break;
604 default:
605 PRINTK("Invalid internal response type: %i\n",
606 resp_type->type);
607 memcpy(msg->message, &error_reply,
608 sizeof error_reply);
609 }
610 } else
611 memcpy(msg->message, reply->message, sizeof error_reply);
612 complete(&(resp_type->work));
613}
614
615static atomic_t zcrypt_step = ATOMIC_INIT(0);
616
617/**
618 * The request distributor calls this function if it picked the PCIXCC/CEX2C
619 * device to handle a modexpo request.
620 * @zdev: pointer to zcrypt_device structure that identifies the
621 * PCIXCC/CEX2C device to the request distributor
622 * @mex: pointer to the modexpo request buffer
623 */
624static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
625 struct ica_rsa_modexpo *mex)
626{
627 struct ap_message ap_msg;
628 struct response_type resp_type = {
629 .type = PCIXCC_RESPONSE_TYPE_ICA,
630 };
631 int rc;
632
633 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
634 if (!ap_msg.message)
635 return -ENOMEM;
636 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
637 atomic_inc_return(&zcrypt_step);
638 ap_msg.private = &resp_type;
639 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
640 if (rc)
641 goto out_free;
642 init_completion(&resp_type.work);
643 ap_queue_message(zdev->ap_dev, &ap_msg);
644 rc = wait_for_completion_interruptible_timeout(
645 &resp_type.work, PCIXCC_CLEANUP_TIME);
646 if (rc > 0)
647 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
648 mex->outputdatalength);
649 else {
650 /* Signal pending or message timed out. */
651 ap_cancel_message(zdev->ap_dev, &ap_msg);
652 if (rc == 0)
653 /* Message timed out. */
654 rc = -ETIME;
655 }
656out_free:
657 free_page((unsigned long) ap_msg.message);
658 return rc;
659}
660
661/**
662 * The request distributor calls this function if it picked the PCIXCC/CEX2C
663 * device to handle a modexpo_crt request.
664 * @zdev: pointer to zcrypt_device structure that identifies the
665 * PCIXCC/CEX2C device to the request distributor
666 * @crt: pointer to the modexpoc_crt request buffer
667 */
668static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
669 struct ica_rsa_modexpo_crt *crt)
670{
671 struct ap_message ap_msg;
672 struct response_type resp_type = {
673 .type = PCIXCC_RESPONSE_TYPE_ICA,
674 };
675 int rc;
676
677 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
678 if (!ap_msg.message)
679 return -ENOMEM;
680 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
681 atomic_inc_return(&zcrypt_step);
682 ap_msg.private = &resp_type;
683 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
684 if (rc)
685 goto out_free;
686 init_completion(&resp_type.work);
687 ap_queue_message(zdev->ap_dev, &ap_msg);
688 rc = wait_for_completion_interruptible_timeout(
689 &resp_type.work, PCIXCC_CLEANUP_TIME);
690 if (rc > 0)
691 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
692 crt->outputdatalength);
693 else {
694 /* Signal pending or message timed out. */
695 ap_cancel_message(zdev->ap_dev, &ap_msg);
696 if (rc == 0)
697 /* Message timed out. */
698 rc = -ETIME;
699 }
700out_free:
701 free_page((unsigned long) ap_msg.message);
702 return rc;
703}
704
705/**
706 * The request distributor calls this function if it picked the PCIXCC/CEX2C
707 * device to handle a send_cprb request.
708 * @zdev: pointer to zcrypt_device structure that identifies the
709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer
711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
713{
714 struct ap_message ap_msg;
715 struct response_type resp_type = {
716 .type = PCIXCC_RESPONSE_TYPE_XCRB,
717 };
718 int rc;
719
720 ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
721 if (!ap_msg.message)
722 return -ENOMEM;
723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
724 atomic_inc_return(&zcrypt_step);
725 ap_msg.private = &resp_type;
726 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
727 if (rc)
728 goto out_free;
729 init_completion(&resp_type.work);
730 ap_queue_message(zdev->ap_dev, &ap_msg);
731 rc = wait_for_completion_interruptible_timeout(
732 &resp_type.work, PCIXCC_CLEANUP_TIME);
733 if (rc > 0)
734 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
735 else {
736 /* Signal pending or message timed out. */
737 ap_cancel_message(zdev->ap_dev, &ap_msg);
738 if (rc == 0)
739 /* Message timed out. */
740 rc = -ETIME;
741 }
742out_free:
743 memset(ap_msg.message, 0x0, ap_msg.length);
744 kfree(ap_msg.message);
745 return rc;
746}
747
748/**
749 * The crypto operations for a PCIXCC/CEX2C card.
750 */
751static struct zcrypt_ops zcrypt_pcixcc_ops = {
752 .rsa_modexpo = zcrypt_pcixcc_modexpo,
753 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
754 .send_cprb = zcrypt_pcixcc_send_cprb,
755};
756
757/**
758 * Micro-code detection function. Its sends a message to a pcixcc card
759 * to find out the microcode level.
760 * @ap_dev: pointer to the AP device.
761 */
762static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
763{
764 static unsigned char msg[] = {
765 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
766 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
767 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
768 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
769 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
770 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
771 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
772 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
773 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
774 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
775 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
776 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
777 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
778 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
779 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
780 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
781 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
782 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
783 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
784 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
785 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
786 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
787 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
788 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
789 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
790 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
791 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
792 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
793 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
794 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
795 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
796 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
797 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
798 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
799 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
800 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
801 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
802 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
803 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
804 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
805 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
806 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
807 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
808 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
809 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
810 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
811 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
812 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
813 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
814 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
815 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
816 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
817 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
818 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
819 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
820 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
821 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
822 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
823 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
824 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
825 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
826 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
827 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
828 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
829 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
830 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
831 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
832 0xF1,0x3D,0x93,0x53
833 };
834 unsigned long long psmid;
835 struct CPRBX *cprbx;
836 char *reply;
837 int rc, i;
838
839 reply = (void *) get_zeroed_page(GFP_KERNEL);
840 if (!reply)
841 return -ENOMEM;
842
843 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
844 if (rc)
845 goto out_free;
846
847 /* Wait for the test message to complete. */
848 for (i = 0; i < 6; i++) {
849 mdelay(300);
850 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
851 if (rc == 0 && psmid == 0x0102030405060708ULL)
852 break;
853 }
854
855 if (i >= 6) {
856 /* Got no answer. */
857 rc = -ENODEV;
858 goto out_free;
859 }
860
861 cprbx = (struct CPRBX *) (reply + 48);
862 if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
863 rc = ZCRYPT_PCIXCC_MCL2;
864 else
865 rc = ZCRYPT_PCIXCC_MCL3;
866out_free:
867 free_page((unsigned long) reply);
868 return rc;
869}
870
871/**
872 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
873 * since the bus_match already checked the hardware type. The PCIXCC
874 * cards come in two flavours: micro code level 2 and micro code level 3.
875 * This is checked by sending a test message to the device.
876 * @ap_dev: pointer to the AP device.
877 */
878static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
879{
880 struct zcrypt_device *zdev;
881 int rc;
882
883 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
884 if (!zdev)
885 return -ENOMEM;
886 zdev->ap_dev = ap_dev;
887 zdev->ops = &zcrypt_pcixcc_ops;
888 zdev->online = 1;
889 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
890 rc = zcrypt_pcixcc_mcl(ap_dev);
891 if (rc < 0) {
892 zcrypt_device_free(zdev);
893 return rc;
894 }
895 zdev->user_space_type = rc;
896 if (rc == ZCRYPT_PCIXCC_MCL2) {
897 zdev->type_string = "PCIXCC_MCL2";
898 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
899 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
900 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
901 } else {
902 zdev->type_string = "PCIXCC_MCL3";
903 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
904 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
905 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
906 }
907 } else {
908 zdev->user_space_type = ZCRYPT_CEX2C;
909 zdev->type_string = "CEX2C";
910 zdev->speed_rating = CEX2C_SPEED_RATING;
911 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
912 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
913 }
914 ap_dev->reply = &zdev->reply;
915 ap_dev->private = zdev;
916 rc = zcrypt_device_register(zdev);
917 if (rc)
918 goto out_free;
919 return 0;
920
921 out_free:
922 ap_dev->private = NULL;
923 zcrypt_device_free(zdev);
924 return rc;
925}
926
927/**
928 * This is called to remove the extended PCIXCC/CEX2C driver information
929 * if an AP device is removed.
930 */
931static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
932{
933 struct zcrypt_device *zdev = ap_dev->private;
934
935 zcrypt_device_unregister(zdev);
936}
937
938int __init zcrypt_pcixcc_init(void)
939{
940 return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
941}
942
943void zcrypt_pcixcc_exit(void)
944{
945 ap_driver_unregister(&zcrypt_pcixcc_driver);
946}
947
948#ifndef CONFIG_ZCRYPT_MONOLITHIC
949module_init(zcrypt_pcixcc_init);
950module_exit(zcrypt_pcixcc_exit);
951#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 000000000000..a78ff307fd19
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,79 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCIXCC_H_
29#define _ZCRYPT_PCIXCC_H_
30
31/**
32 * CPRBX
33 * Note that all shorts and ints are big-endian.
34 * All pointer fields are 16 bytes long, and mean nothing.
35 *
36 * A request CPRB is followed by a request_parameter_block.
37 *
38 * The request (or reply) parameter block is organized thus:
39 * function code
40 * VUD block
41 * key block
42 */
43struct CPRBX {
44 unsigned short cprb_len; /* CPRB length 220 */
45 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
46 unsigned char pad_000[3]; /* Alignment pad bytes */
47 unsigned char func_id[2]; /* function id 0x5432 */
48 unsigned char cprb_flags[4]; /* Flags */
49 unsigned int req_parml; /* request parameter buffer len */
50 unsigned int req_datal; /* request data buffer */
51 unsigned int rpl_msgbl; /* reply message block length */
52 unsigned int rpld_parml; /* replied parameter block len */
53 unsigned int rpl_datal; /* reply data block len */
54 unsigned int rpld_datal; /* replied data block len */
55 unsigned int req_extbl; /* request extension block len */
56 unsigned char pad_001[4]; /* reserved */
57 unsigned int rpld_extbl; /* replied extension block len */
58 unsigned char req_parmb[16]; /* request parm block 'address' */
59 unsigned char req_datab[16]; /* request data block 'address' */
60 unsigned char rpl_parmb[16]; /* reply parm block 'address' */
61 unsigned char rpl_datab[16]; /* reply data block 'address' */
62 unsigned char req_extb[16]; /* request extension block 'addr'*/
63 unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
64 unsigned short ccp_rtcode; /* server return code */
65 unsigned short ccp_rscode; /* server reason code */
66 unsigned int mac_data_len; /* Mac Data Length */
67 unsigned char logon_id[8]; /* Logon Identifier */
68 unsigned char mac_value[8]; /* Mac Value */
69 unsigned char mac_content_flgs;/* Mac content flag byte */
70 unsigned char pad_002; /* Alignment */
71 unsigned short domain; /* Domain */
72 unsigned char pad_003[12]; /* Domain masks */
73 unsigned char pad_004[36]; /* reserved */
74} __attribute__((packed));
75
76int zcrypt_pcixcc_init(void);
77void zcrypt_pcixcc_exit(void);
78
79#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5399c5d99b81..a914129a4da9 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -19,9 +19,6 @@
19 19
20#include "s390mach.h" 20#include "s390mach.h"
21 21
22#define DBG printk
23// #define DBG(args,...) do {} while (0);
24
25static struct semaphore m_sem; 22static struct semaphore m_sem;
26 23
27extern int css_process_crw(int, int); 24extern int css_process_crw(int, int);
@@ -83,11 +80,11 @@ repeat:
83 ccode = stcrw(&crw[chain]); 80 ccode = stcrw(&crw[chain]);
84 if (ccode != 0) 81 if (ccode != 0)
85 break; 82 break;
86 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 83 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
87 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 84 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
88 crw[chain].slct, crw[chain].oflw, crw[chain].chn, 85 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
89 crw[chain].rsc, crw[chain].anc, crw[chain].erc, 86 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
90 crw[chain].rsid); 87 crw[chain].rsid);
91 /* Check for overflows. */ 88 /* Check for overflows. */
92 if (crw[chain].oflw) { 89 if (crw[chain].oflw) {
93 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 90 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
@@ -117,8 +114,8 @@ repeat:
117 * reported to the common I/O layer. 114 * reported to the common I/O layer.
118 */ 115 */
119 if (crw[chain].slct) { 116 if (crw[chain].slct) {
120 DBG(KERN_INFO"solicited machine check for " 117 pr_debug("solicited machine check for "
121 "channel path %02X\n", crw[0].rsid); 118 "channel path %02X\n", crw[0].rsid);
122 break; 119 break;
123 } 120 }
124 switch (crw[0].erc) { 121 switch (crw[0].erc) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index adc9d8f2c28f..5d39b2df0cc4 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -189,6 +189,10 @@ struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
189 struct zfcp_fsf_req *request, *tmp; 189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i; 190 unsigned int i;
191 191
192 /* 0 is reserved as an invalid req_id */
193 if (req_id == 0)
194 return NULL;
195
192 i = req_id % REQUEST_LIST_SIZE; 196 i = req_id % REQUEST_LIST_SIZE;
193 197
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) 198 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
@@ -299,11 +303,45 @@ zfcp_init_device_configure(void)
299 return; 303 return;
300} 304}
301 305
306static int calc_alignment(int size)
307{
308 int align = 1;
309
310 if (!size)
311 return 0;
312
313 while ((size - align) > 0)
314 align <<= 1;
315
316 return align;
317}
318
302static int __init 319static int __init
303zfcp_module_init(void) 320zfcp_module_init(void)
304{ 321{
322 int retval = -ENOMEM;
323 int size, align;
324
325 size = sizeof(struct zfcp_fsf_req_qtcb);
326 align = calc_alignment(size);
327 zfcp_data.fsf_req_qtcb_cache =
328 kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
329 if (!zfcp_data.fsf_req_qtcb_cache)
330 goto out;
305 331
306 int retval = 0; 332 size = sizeof(struct fsf_status_read_buffer);
333 align = calc_alignment(size);
334 zfcp_data.sr_buffer_cache =
335 kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
336 if (!zfcp_data.sr_buffer_cache)
337 goto out_sr_cache;
338
339 size = sizeof(struct zfcp_gid_pn_data);
340 align = calc_alignment(size);
341 zfcp_data.gid_pn_cache =
342 kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
343 if (!zfcp_data.gid_pn_cache)
344 goto out_gid_cache;
307 345
308 atomic_set(&zfcp_data.loglevel, loglevel); 346 atomic_set(&zfcp_data.loglevel, loglevel);
309 347
@@ -313,15 +351,16 @@ zfcp_module_init(void)
313 /* initialize adapters to be removed list head */ 351 /* initialize adapters to be removed list head */
314 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); 352 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
315 353
316 zfcp_transport_template = fc_attach_transport(&zfcp_transport_functions); 354 zfcp_data.scsi_transport_template =
317 if (!zfcp_transport_template) 355 fc_attach_transport(&zfcp_transport_functions);
318 return -ENODEV; 356 if (!zfcp_data.scsi_transport_template)
357 goto out_transport;
319 358
320 retval = misc_register(&zfcp_cfdc_misc); 359 retval = misc_register(&zfcp_cfdc_misc);
321 if (retval != 0) { 360 if (retval != 0) {
322 ZFCP_LOG_INFO("registration of misc device " 361 ZFCP_LOG_INFO("registration of misc device "
323 "zfcp_cfdc failed\n"); 362 "zfcp_cfdc failed\n");
324 goto out; 363 goto out_misc;
325 } 364 }
326 365
327 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n", 366 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
@@ -333,9 +372,6 @@ zfcp_module_init(void)
333 /* initialise configuration rw lock */ 372 /* initialise configuration rw lock */
334 rwlock_init(&zfcp_data.config_lock); 373 rwlock_init(&zfcp_data.config_lock);
335 374
336 /* save address of data structure managing the driver module */
337 zfcp_data.scsi_host_template.module = THIS_MODULE;
338
339 /* setup dynamic I/O */ 375 /* setup dynamic I/O */
340 retval = zfcp_ccw_register(); 376 retval = zfcp_ccw_register();
341 if (retval) { 377 if (retval) {
@@ -350,6 +386,14 @@ zfcp_module_init(void)
350 386
351 out_ccw_register: 387 out_ccw_register:
352 misc_deregister(&zfcp_cfdc_misc); 388 misc_deregister(&zfcp_cfdc_misc);
389 out_misc:
390 fc_release_transport(zfcp_data.scsi_transport_template);
391 out_transport:
392 kmem_cache_destroy(zfcp_data.gid_pn_cache);
393 out_gid_cache:
394 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
395 out_sr_cache:
396 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
353 out: 397 out:
354 return retval; 398 return retval;
355} 399}
@@ -935,20 +979,20 @@ static int
935zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 979zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
936{ 980{
937 adapter->pool.fsf_req_erp = 981 adapter->pool.fsf_req_erp =
938 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, 982 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
939 sizeof(struct zfcp_fsf_req_pool_element)); 983 zfcp_data.fsf_req_qtcb_cache);
940 if (!adapter->pool.fsf_req_erp) 984 if (!adapter->pool.fsf_req_erp)
941 return -ENOMEM; 985 return -ENOMEM;
942 986
943 adapter->pool.fsf_req_scsi = 987 adapter->pool.fsf_req_scsi =
944 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, 988 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
945 sizeof(struct zfcp_fsf_req_pool_element)); 989 zfcp_data.fsf_req_qtcb_cache);
946 if (!adapter->pool.fsf_req_scsi) 990 if (!adapter->pool.fsf_req_scsi)
947 return -ENOMEM; 991 return -ENOMEM;
948 992
949 adapter->pool.fsf_req_abort = 993 adapter->pool.fsf_req_abort =
950 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, 994 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
951 sizeof(struct zfcp_fsf_req_pool_element)); 995 zfcp_data.fsf_req_qtcb_cache);
952 if (!adapter->pool.fsf_req_abort) 996 if (!adapter->pool.fsf_req_abort)
953 return -ENOMEM; 997 return -ENOMEM;
954 998
@@ -959,14 +1003,14 @@ zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
959 return -ENOMEM; 1003 return -ENOMEM;
960 1004
961 adapter->pool.data_status_read = 1005 adapter->pool.data_status_read =
962 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, 1006 mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR,
963 sizeof(struct fsf_status_read_buffer)); 1007 zfcp_data.sr_buffer_cache);
964 if (!adapter->pool.data_status_read) 1008 if (!adapter->pool.data_status_read)
965 return -ENOMEM; 1009 return -ENOMEM;
966 1010
967 adapter->pool.data_gid_pn = 1011 adapter->pool.data_gid_pn =
968 mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, 1012 mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR,
969 sizeof(struct zfcp_gid_pn_data)); 1013 zfcp_data.gid_pn_cache);
970 if (!adapter->pool.data_gid_pn) 1014 if (!adapter->pool.data_gid_pn)
971 return -ENOMEM; 1015 return -ENOMEM;
972 1016
@@ -1091,9 +1135,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1091 /* initialize lock of associated request queue */ 1135 /* initialize lock of associated request queue */
1092 rwlock_init(&adapter->request_queue.queue_lock); 1136 rwlock_init(&adapter->request_queue.queue_lock);
1093 1137
1094 /* intitialise SCSI ER timer */
1095 init_timer(&adapter->scsi_er_timer);
1096
1097 /* mark adapter unusable as long as sysfs registration is not complete */ 1138 /* mark adapter unusable as long as sysfs registration is not complete */
1098 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 1139 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1099 1140
@@ -1609,7 +1650,6 @@ zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1609 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 1650 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1610 gid_pn->ct.handler_data = (unsigned long) gid_pn; 1651 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1611 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 1652 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1612 gid_pn->ct.timer = &erp_action->timer;
1613 gid_pn->port = erp_action->port; 1653 gid_pn->port = erp_action->port;
1614 1654
1615 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 1655 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index fdabadeaa9ee..81680efa1721 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -275,19 +275,6 @@ zfcp_ccw_register(void)
275} 275}
276 276
277/** 277/**
278 * zfcp_ccw_unregister - ccw unregister function
279 *
280 * Unregisters the driver from common i/o layer. Function will be called at
281 * module unload/system shutdown.
282 */
283void __exit
284zfcp_ccw_unregister(void)
285{
286 zfcp_sysfs_driver_remove_files(&zfcp_ccw_driver.driver);
287 ccw_driver_unregister(&zfcp_ccw_driver);
288}
289
290/**
291 * zfcp_ccw_shutdown - gets called on reboot/shutdown 278 * zfcp_ccw_shutdown - gets called on reboot/shutdown
292 * 279 *
293 * Makes sure that QDIO queues are down when the system gets stopped. 280 * Makes sure that QDIO queues are down when the system gets stopped.
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index c033145d0f19..0aa3b1ac76af 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -707,7 +707,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
709 struct zfcp_fsf_req *fsf_req, 709 struct zfcp_fsf_req *fsf_req,
710 struct zfcp_fsf_req *old_fsf_req) 710 unsigned long old_req_id)
711{ 711{
712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
@@ -768,8 +768,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
768 rec->fsf_seqno = fsf_req->seq_no; 768 rec->fsf_seqno = fsf_req->seq_no;
769 rec->fsf_issued = fsf_req->issued; 769 rec->fsf_issued = fsf_req->issued;
770 } 770 }
771 rec->type.old_fsf_reqid = 771 rec->type.old_fsf_reqid = old_req_id;
772 (unsigned long) old_fsf_req;
773 } else { 772 } else {
774 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 773 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
775 dump->total_size = buflen; 774 dump->total_size = buflen;
@@ -794,17 +793,17 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
794 struct zfcp_fsf_req *fsf_req) 793 struct zfcp_fsf_req *fsf_req)
795{ 794{
796 _zfcp_scsi_dbf_event_common("rslt", tag, level, 795 _zfcp_scsi_dbf_event_common("rslt", tag, level,
797 adapter, scsi_cmnd, fsf_req, NULL); 796 adapter, scsi_cmnd, fsf_req, 0);
798} 797}
799 798
800inline void 799inline void
801zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
802 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
803 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
804 struct zfcp_fsf_req *old_fsf_req) 803 unsigned long old_req_id)
805{ 804{
806 _zfcp_scsi_dbf_event_common("abrt", tag, 1, 805 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
807 adapter, scsi_cmnd, new_fsf_req, old_fsf_req); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
808} 807}
809 808
810inline void 809inline void
@@ -814,7 +813,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
814 struct zfcp_adapter *adapter = unit->port->adapter; 813 struct zfcp_adapter *adapter = unit->port->adapter;
815 814
816 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 815 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
817 tag, 1, adapter, scsi_cmnd, NULL, NULL); 816 tag, 1, adapter, scsi_cmnd, NULL, 0);
818} 817}
819 818
820static int 819static int
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 94d1b74db356..8f882690994d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -19,7 +19,6 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22
23#ifndef ZFCP_DEF_H 22#ifndef ZFCP_DEF_H
24#define ZFCP_DEF_H 23#define ZFCP_DEF_H
25 24
@@ -32,6 +31,10 @@
32#include <linux/blkdev.h> 31#include <linux/blkdev.h>
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/mempool.h>
36#include <linux/syscalls.h>
37#include <linux/ioctl.h>
35#include <scsi/scsi.h> 38#include <scsi/scsi.h>
36#include <scsi/scsi_tcq.h> 39#include <scsi/scsi_tcq.h>
37#include <scsi/scsi_cmnd.h> 40#include <scsi/scsi_cmnd.h>
@@ -39,14 +42,11 @@
39#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
40#include <scsi/scsi_transport.h> 43#include <scsi/scsi_transport.h>
41#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
42#include "zfcp_fsf.h"
43#include <asm/ccwdev.h> 45#include <asm/ccwdev.h>
44#include <asm/qdio.h> 46#include <asm/qdio.h>
45#include <asm/debug.h> 47#include <asm/debug.h>
46#include <asm/ebcdic.h> 48#include <asm/ebcdic.h>
47#include <linux/mempool.h> 49#include "zfcp_fsf.h"
48#include <linux/syscalls.h>
49#include <linux/ioctl.h>
50 50
51 51
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
@@ -137,7 +137,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
138 138
139/* timeout value for "default timer" for fsf requests */ 139/* timeout value for "default timer" for fsf requests */
140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
141 141
142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
143 143
@@ -543,7 +543,7 @@ do { \
543} while (0) 543} while (0)
544 544
545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL 545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
546# define ZFCP_LOG_NORMAL(fmt, args...) 546# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
547#else 547#else
548# define ZFCP_LOG_NORMAL(fmt, args...) \ 548# define ZFCP_LOG_NORMAL(fmt, args...) \
549do { \ 549do { \
@@ -553,7 +553,7 @@ do { \
553#endif 553#endif
554 554
555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO 555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
556# define ZFCP_LOG_INFO(fmt, args...) 556# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
557#else 557#else
558# define ZFCP_LOG_INFO(fmt, args...) \ 558# define ZFCP_LOG_INFO(fmt, args...) \
559do { \ 559do { \
@@ -563,14 +563,14 @@ do { \
563#endif 563#endif
564 564
565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG 565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
566# define ZFCP_LOG_DEBUG(fmt, args...) 566# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
567#else 567#else
568# define ZFCP_LOG_DEBUG(fmt, args...) \ 568# define ZFCP_LOG_DEBUG(fmt, args...) \
569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args) 569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
570#endif 570#endif
571 571
572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE 572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
573# define ZFCP_LOG_TRACE(fmt, args...) 573# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
574#else 574#else
575# define ZFCP_LOG_TRACE(fmt, args...) \ 575# define ZFCP_LOG_TRACE(fmt, args...) \
576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args) 576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
@@ -779,7 +779,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
779 * @handler_data: data passed to handler function 779 * @handler_data: data passed to handler function
780 * @pool: pointer to memory pool for ct request structure 780 * @pool: pointer to memory pool for ct request structure
781 * @timeout: FSF timeout for this request 781 * @timeout: FSF timeout for this request
782 * @timer: timer (e.g. for request initiated by erp)
783 * @completion: completion for synchronization purposes 782 * @completion: completion for synchronization purposes
784 * @status: used to pass error status to calling function 783 * @status: used to pass error status to calling function
785 */ 784 */
@@ -793,7 +792,6 @@ struct zfcp_send_ct {
793 unsigned long handler_data; 792 unsigned long handler_data;
794 mempool_t *pool; 793 mempool_t *pool;
795 int timeout; 794 int timeout;
796 struct timer_list *timer;
797 struct completion *completion; 795 struct completion *completion;
798 int status; 796 int status;
799}; 797};
@@ -821,7 +819,6 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
821 * @resp_count: number of elements in response scatter-gather list 819 * @resp_count: number of elements in response scatter-gather list
822 * @handler: handler function (called for response to the request) 820 * @handler: handler function (called for response to the request)
823 * @handler_data: data passed to handler function 821 * @handler_data: data passed to handler function
824 * @timer: timer (e.g. for request initiated by erp)
825 * @completion: completion for synchronization purposes 822 * @completion: completion for synchronization purposes
826 * @ls_code: hex code of ELS command 823 * @ls_code: hex code of ELS command
827 * @status: used to pass error status to calling function 824 * @status: used to pass error status to calling function
@@ -836,7 +833,6 @@ struct zfcp_send_els {
836 unsigned int resp_count; 833 unsigned int resp_count;
837 zfcp_send_els_handler_t handler; 834 zfcp_send_els_handler_t handler;
838 unsigned long handler_data; 835 unsigned long handler_data;
839 struct timer_list *timer;
840 struct completion *completion; 836 struct completion *completion;
841 int ls_code; 837 int ls_code;
842 int status; 838 int status;
@@ -886,7 +882,6 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 882 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 883 removed */
888 u32 ports; /* number of remote ports */ 884 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 atomic_t reqs_active; /* # active FSF reqs */ 885 atomic_t reqs_active; /* # active FSF reqs */
891 unsigned long req_no; /* unique FSF req number */ 886 unsigned long req_no; /* unique FSF req number */
892 struct list_head *req_list; /* list of pending reqs */ 887 struct list_head *req_list; /* list of pending reqs */
@@ -1003,6 +998,7 @@ struct zfcp_fsf_req {
1003 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 998 struct fsf_qtcb *qtcb; /* address of associated QTCB */
1004 u32 seq_no; /* Sequence number of request */ 999 u32 seq_no; /* Sequence number of request */
1005 unsigned long data; /* private data of request */ 1000 unsigned long data; /* private data of request */
1001 struct timer_list timer; /* used for erp or scsi er */
1006 struct zfcp_erp_action *erp_action; /* used if this request is 1002 struct zfcp_erp_action *erp_action; /* used if this request is
1007 issued on behalf of erp */ 1003 issued on behalf of erp */
1008 mempool_t *pool; /* used if request was alloacted 1004 mempool_t *pool; /* used if request was alloacted
@@ -1016,6 +1012,7 @@ typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
1016/* driver data */ 1012/* driver data */
1017struct zfcp_data { 1013struct zfcp_data {
1018 struct scsi_host_template scsi_host_template; 1014 struct scsi_host_template scsi_host_template;
1015 struct scsi_transport_template *scsi_transport_template;
1019 atomic_t status; /* Module status flags */ 1016 atomic_t status; /* Module status flags */
1020 struct list_head adapter_list_head; /* head of adapter list */ 1017 struct list_head adapter_list_head; /* head of adapter list */
1021 struct list_head adapter_remove_lh; /* head of adapters to be 1018 struct list_head adapter_remove_lh; /* head of adapters to be
@@ -1031,6 +1028,9 @@ struct zfcp_data {
1031 wwn_t init_wwpn; 1028 wwn_t init_wwpn;
1032 fcp_lun_t init_fcp_lun; 1029 fcp_lun_t init_fcp_lun;
1033 char *driver_version; 1030 char *driver_version;
1031 kmem_cache_t *fsf_req_qtcb_cache;
1032 kmem_cache_t *sr_buffer_cache;
1033 kmem_cache_t *gid_pn_cache;
1034}; 1034};
1035 1035
1036/** 1036/**
@@ -1051,7 +1051,7 @@ struct zfcp_sg_list {
1051#define ZFCP_POOL_DATA_GID_PN_NR 1 1051#define ZFCP_POOL_DATA_GID_PN_NR 1
1052 1052
1053/* struct used by memory pools for fsf_requests */ 1053/* struct used by memory pools for fsf_requests */
1054struct zfcp_fsf_req_pool_element { 1054struct zfcp_fsf_req_qtcb {
1055 struct zfcp_fsf_req fsf_req; 1055 struct zfcp_fsf_req fsf_req;
1056 struct fsf_qtcb qtcb; 1056 struct fsf_qtcb qtcb;
1057}; 1057};
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7f60b6fdf724..862a411a4aa0 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,6 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 67static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 68static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,6 +91,7 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 91static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 92static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 93
94static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
96static void zfcp_erp_action_dismiss_port(struct zfcp_port *); 95static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 96static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); 97static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
@@ -111,64 +110,86 @@ static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
111static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *); 110static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
112 111
113static void zfcp_erp_memwait_handler(unsigned long); 112static void zfcp_erp_memwait_handler(unsigned long);
114static void zfcp_erp_timeout_handler(unsigned long);
115static inline void zfcp_erp_timeout_init(struct zfcp_erp_action *);
116 113
117/** 114/**
118 * zfcp_fsf_request_timeout_handler - called if a request timed out 115 * zfcp_close_qdio - close qdio queues for an adapter
119 * @data: pointer to adapter for handler function
120 *
121 * This function needs to be called if requests (ELS, Generic Service,
122 * or SCSI commands) exceed a certain time limit. The assumption is
123 * that after the time limit the adapter get stuck. So we trigger a reopen of
124 * the adapter. This should not be used for error recovery, SCSI abort
125 * commands and SCSI requests from SCSI mid-layer.
126 */ 116 */
127void 117static void zfcp_close_qdio(struct zfcp_adapter *adapter)
128zfcp_fsf_request_timeout_handler(unsigned long data)
129{ 118{
130 struct zfcp_adapter *adapter; 119 struct zfcp_qdio_queue *req_queue;
120 int first, count;
131 121
132 adapter = (struct zfcp_adapter *) data; 122 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
123 return;
133 124
134 zfcp_erp_adapter_reopen(adapter, 0); 125 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
126 req_queue = &adapter->request_queue;
127 write_lock_irq(&req_queue->queue_lock);
128 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
129 write_unlock_irq(&req_queue->queue_lock);
130
131 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
132 while (qdio_shutdown(adapter->ccw_device,
133 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
134 msleep(1000);
135 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
136
137 /* cleanup used outbound sbals */
138 count = atomic_read(&req_queue->free_count);
139 if (count < QDIO_MAX_BUFFERS_PER_Q) {
140 first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q;
141 count = QDIO_MAX_BUFFERS_PER_Q - count;
142 zfcp_qdio_zero_sbals(req_queue->buffer, first, count);
143 }
144 req_queue->free_index = 0;
145 atomic_set(&req_queue->free_count, 0);
146 req_queue->distance_from_int = 0;
147 adapter->response_queue.free_index = 0;
148 atomic_set(&adapter->response_queue.free_count, 0);
135} 149}
136 150
137/** 151/**
138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks 152 * zfcp_close_fsf - stop FSF operations for an adapter
139 * 153 *
140 * This function needs to be called whenever a SCSI error recovery 154 * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
141 * action (abort/reset) does not return. Re-opening the adapter means 155 * requests waiting for completion; especially this returns SCSI commands
142 * that the abort/reset command can be returned by zfcp. It won't complete 156 * with error state).
143 * via the adapter anymore (because qdio queues are closed). If ERP is
144 * already running on this adapter it will be stopped.
145 */ 157 */
146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) 158static void zfcp_close_fsf(struct zfcp_adapter *adapter)
147{ 159{
148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 160 /* close queues to ensure that buffers are not accessed by adapter */
149 unsigned long flags; 161 zfcp_close_qdio(adapter);
150 162 zfcp_fsf_req_dismiss_all(adapter);
151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 163 /* reset FSF request sequence number */
152 "Restarting all operations on the adapter %s\n", 164 adapter->fsf_req_seq_no = 0;
153 zfcp_get_busid_by_adapter(adapter)); 165 /* all ports and units are closed */
154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 166 zfcp_erp_modify_adapter_status(adapter,
167 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
168}
155 169
156 write_lock_irqsave(&adapter->erp_lock, flags); 170/**
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 171 * zfcp_fsf_request_timeout_handler - called if a request timed out
158 &adapter->status)) { 172 * @data: pointer to adapter for handler function
159 zfcp_erp_modify_adapter_status(adapter, 173 *
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, 174 * This function needs to be called if requests (ELS, Generic Service,
161 ZFCP_CLEAR); 175 * or SCSI commands) exceed a certain time limit. The assumption is
162 zfcp_erp_action_dismiss_adapter(adapter); 176 * that after the time limit the adapter get stuck. So we trigger a reopen of
163 write_unlock_irqrestore(&adapter->erp_lock, flags); 177 * the adapter.
164 /* dismiss all pending requests including requests for ERP */ 178 */
165 zfcp_fsf_req_dismiss_all(adapter); 179static void zfcp_fsf_request_timeout_handler(unsigned long data)
166 adapter->fsf_req_seq_no = 0; 180{
167 } else 181 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0); 182 zfcp_erp_adapter_reopen(adapter, 0);
170} 183}
171 184
185void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
186{
187 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
188 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
189 fsf_req->timer.expires = timeout;
190 add_timer(&fsf_req->timer);
191}
192
172/* 193/*
173 * function: 194 * function:
174 * 195 *
@@ -282,7 +303,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
282 struct zfcp_ls_adisc *adisc; 303 struct zfcp_ls_adisc *adisc;
283 void *address = NULL; 304 void *address = NULL;
284 int retval = 0; 305 int retval = 0;
285 struct timer_list *timer;
286 306
287 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC); 307 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
288 if (send_els == NULL) 308 if (send_els == NULL)
@@ -329,22 +349,11 @@ zfcp_erp_adisc(struct zfcp_port *port)
329 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 349 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
330 adisc->nport_id); 350 adisc->nport_id);
331 351
332 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
333 if (!timer)
334 goto nomem;
335
336 init_timer(timer);
337 timer->function = zfcp_fsf_request_timeout_handler;
338 timer->data = (unsigned long) adapter;
339 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
340 send_els->timer = timer;
341
342 retval = zfcp_fsf_send_els(send_els); 352 retval = zfcp_fsf_send_els(send_els);
343 if (retval != 0) { 353 if (retval != 0) {
344 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port " 354 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
345 "0x%08x on adapter %s\n", send_els->d_id, 355 "0x%08x on adapter %s\n", send_els->d_id,
346 zfcp_get_busid_by_adapter(adapter)); 356 zfcp_get_busid_by_adapter(adapter));
347 del_timer(send_els->timer);
348 goto freemem; 357 goto freemem;
349 } 358 }
350 359
@@ -356,7 +365,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
356 if (address != NULL) 365 if (address != NULL)
357 __free_pages(send_els->req->page, 0); 366 __free_pages(send_els->req->page, 0);
358 if (send_els != NULL) { 367 if (send_els != NULL) {
359 kfree(send_els->timer);
360 kfree(send_els->req); 368 kfree(send_els->req);
361 kfree(send_els->resp); 369 kfree(send_els->resp);
362 kfree(send_els); 370 kfree(send_els);
@@ -382,9 +390,6 @@ zfcp_erp_adisc_handler(unsigned long data)
382 struct zfcp_ls_adisc_acc *adisc; 390 struct zfcp_ls_adisc_acc *adisc;
383 391
384 send_els = (struct zfcp_send_els *) data; 392 send_els = (struct zfcp_send_els *) data;
385
386 del_timer(send_els->timer);
387
388 adapter = send_els->adapter; 393 adapter = send_els->adapter;
389 port = send_els->port; 394 port = send_els->port;
390 d_id = send_els->d_id; 395 d_id = send_els->d_id;
@@ -433,7 +438,6 @@ zfcp_erp_adisc_handler(unsigned long data)
433 out: 438 out:
434 zfcp_port_put(port); 439 zfcp_port_put(port);
435 __free_pages(send_els->req->page, 0); 440 __free_pages(send_els->req->page, 0);
436 kfree(send_els->timer);
437 kfree(send_els->req); 441 kfree(send_els->req);
438 kfree(send_els->resp); 442 kfree(send_els->resp);
439 kfree(send_els); 443 kfree(send_els);
@@ -909,8 +913,6 @@ static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
909 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex"); 913 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
910 debug_event(adapter->erp_dbf, 2, &erp_action->action, 914 debug_event(adapter->erp_dbf, 2, &erp_action->action,
911 sizeof (int)); 915 sizeof (int));
912 if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
913 del_timer(&erp_action->timer);
914 erp_action->status |= set_mask; 916 erp_action->status |= set_mask;
915 zfcp_erp_action_ready(erp_action); 917 zfcp_erp_action_ready(erp_action);
916 } else { 918 } else {
@@ -957,8 +959,7 @@ zfcp_erp_memwait_handler(unsigned long data)
957 * action gets an appropriate flag and will be processed 959 * action gets an appropriate flag and will be processed
958 * accordingly 960 * accordingly
959 */ 961 */
960static void 962void zfcp_erp_timeout_handler(unsigned long data)
961zfcp_erp_timeout_handler(unsigned long data)
962{ 963{
963 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; 964 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
964 struct zfcp_adapter *adapter = erp_action->adapter; 965 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1934,8 +1935,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
1934 &erp_action->adapter->status); 1935 &erp_action->adapter->status);
1935 1936
1936 failed_openfcp: 1937 failed_openfcp:
1937 zfcp_erp_adapter_strategy_close_qdio(erp_action); 1938 zfcp_close_fsf(erp_action->adapter);
1938 zfcp_erp_adapter_strategy_close_fsf(erp_action);
1939 failed_qdio: 1939 failed_qdio:
1940 out: 1940 out:
1941 return retval; 1941 return retval;
@@ -2040,59 +2040,6 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2040 return retval; 2040 return retval;
2041} 2041}
2042 2042
2043/**
2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2045 */
2046static void
2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2048{
2049 int first_used;
2050 int used_count;
2051 struct zfcp_adapter *adapter = erp_action->adapter;
2052
2053 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2055 "queues on adapter %s\n",
2056 zfcp_get_busid_by_adapter(adapter));
2057 return;
2058 }
2059
2060 /*
2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2063 */
2064 write_lock_irq(&adapter->request_queue.queue_lock);
2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2066 write_unlock_irq(&adapter->request_queue.queue_lock);
2067
2068 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
2069 while (qdio_shutdown(adapter->ccw_device,
2070 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
2071 msleep(1000);
2072 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
2073
2074 /*
2075 * First we had to stop QDIO operation.
2076 * Now it is safe to take the following actions.
2077 */
2078
2079 /* Cleanup only necessary when there are unacknowledged buffers */
2080 if (atomic_read(&adapter->request_queue.free_count)
2081 < QDIO_MAX_BUFFERS_PER_Q) {
2082 first_used = (adapter->request_queue.free_index +
2083 atomic_read(&adapter->request_queue.free_count))
2084 % QDIO_MAX_BUFFERS_PER_Q;
2085 used_count = QDIO_MAX_BUFFERS_PER_Q -
2086 atomic_read(&adapter->request_queue.free_count);
2087 zfcp_qdio_zero_sbals(adapter->request_queue.buffer,
2088 first_used, used_count);
2089 }
2090 adapter->response_queue.free_index = 0;
2091 atomic_set(&adapter->response_queue.free_count, 0);
2092 adapter->request_queue.free_index = 0;
2093 atomic_set(&adapter->request_queue.free_count, 0);
2094 adapter->request_queue.distance_from_int = 0;
2095}
2096 2043
2097static int 2044static int
2098zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2045zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
@@ -2127,7 +2074,6 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2127 write_lock_irq(&adapter->erp_lock); 2074 write_lock_irq(&adapter->erp_lock);
2128 zfcp_erp_action_to_running(erp_action); 2075 zfcp_erp_action_to_running(erp_action);
2129 write_unlock_irq(&adapter->erp_lock); 2076 write_unlock_irq(&adapter->erp_lock);
2130 zfcp_erp_timeout_init(erp_action);
2131 if (zfcp_fsf_exchange_config_data(erp_action)) { 2077 if (zfcp_fsf_exchange_config_data(erp_action)) {
2132 retval = ZFCP_ERP_FAILED; 2078 retval = ZFCP_ERP_FAILED;
2133 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf"); 2079 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
@@ -2196,7 +2142,6 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2196 zfcp_erp_action_to_running(erp_action); 2142 zfcp_erp_action_to_running(erp_action);
2197 write_unlock_irq(&adapter->erp_lock); 2143 write_unlock_irq(&adapter->erp_lock);
2198 2144
2199 zfcp_erp_timeout_init(erp_action);
2200 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2145 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2201 if (ret == -EOPNOTSUPP) { 2146 if (ret == -EOPNOTSUPP) {
2202 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2147 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
@@ -2248,27 +2193,6 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2248 return retval; 2193 return retval;
2249} 2194}
2250 2195
2251/**
2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2253 */
2254static void
2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2256{
2257 struct zfcp_adapter *adapter = erp_action->adapter;
2258
2259 /*
2260 * wake waiting initiators of requests,
2261 * return SCSI commands (with error status),
2262 * clean up all requests (synchronously)
2263 */
2264 zfcp_fsf_req_dismiss_all(adapter);
2265 /* reset FSF request sequence number */
2266 adapter->fsf_req_seq_no = 0;
2267 /* all ports and units are closed */
2268 zfcp_erp_modify_adapter_status(adapter,
2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2270}
2271
2272/* 2196/*
2273 * function: 2197 * function:
2274 * 2198 *
@@ -2605,7 +2529,6 @@ zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2605 struct zfcp_adapter *adapter = erp_action->adapter; 2529 struct zfcp_adapter *adapter = erp_action->adapter;
2606 struct zfcp_port *port = erp_action->port; 2530 struct zfcp_port *port = erp_action->port;
2607 2531
2608 zfcp_erp_timeout_init(erp_action);
2609 retval = zfcp_fsf_close_physical_port(erp_action); 2532 retval = zfcp_fsf_close_physical_port(erp_action);
2610 if (retval == -ENOMEM) { 2533 if (retval == -ENOMEM) {
2611 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem"); 2534 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
@@ -2662,7 +2585,6 @@ zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2662 struct zfcp_adapter *adapter = erp_action->adapter; 2585 struct zfcp_adapter *adapter = erp_action->adapter;
2663 struct zfcp_port *port = erp_action->port; 2586 struct zfcp_port *port = erp_action->port;
2664 2587
2665 zfcp_erp_timeout_init(erp_action);
2666 retval = zfcp_fsf_close_port(erp_action); 2588 retval = zfcp_fsf_close_port(erp_action);
2667 if (retval == -ENOMEM) { 2589 if (retval == -ENOMEM) {
2668 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem"); 2590 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
@@ -2700,7 +2622,6 @@ zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2700 struct zfcp_adapter *adapter = erp_action->adapter; 2622 struct zfcp_adapter *adapter = erp_action->adapter;
2701 struct zfcp_port *port = erp_action->port; 2623 struct zfcp_port *port = erp_action->port;
2702 2624
2703 zfcp_erp_timeout_init(erp_action);
2704 retval = zfcp_fsf_open_port(erp_action); 2625 retval = zfcp_fsf_open_port(erp_action);
2705 if (retval == -ENOMEM) { 2626 if (retval == -ENOMEM) {
2706 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem"); 2627 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
@@ -2738,7 +2659,6 @@ zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2738 struct zfcp_adapter *adapter = erp_action->adapter; 2659 struct zfcp_adapter *adapter = erp_action->adapter;
2739 struct zfcp_port *port = erp_action->port; 2660 struct zfcp_port *port = erp_action->port;
2740 2661
2741 zfcp_erp_timeout_init(erp_action);
2742 retval = zfcp_ns_gid_pn_request(erp_action); 2662 retval = zfcp_ns_gid_pn_request(erp_action);
2743 if (retval == -ENOMEM) { 2663 if (retval == -ENOMEM) {
2744 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem"); 2664 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
@@ -2864,7 +2784,6 @@ zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2864 struct zfcp_adapter *adapter = erp_action->adapter; 2784 struct zfcp_adapter *adapter = erp_action->adapter;
2865 struct zfcp_unit *unit = erp_action->unit; 2785 struct zfcp_unit *unit = erp_action->unit;
2866 2786
2867 zfcp_erp_timeout_init(erp_action);
2868 retval = zfcp_fsf_close_unit(erp_action); 2787 retval = zfcp_fsf_close_unit(erp_action);
2869 if (retval == -ENOMEM) { 2788 if (retval == -ENOMEM) {
2870 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem"); 2789 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
@@ -2905,7 +2824,6 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2905 struct zfcp_adapter *adapter = erp_action->adapter; 2824 struct zfcp_adapter *adapter = erp_action->adapter;
2906 struct zfcp_unit *unit = erp_action->unit; 2825 struct zfcp_unit *unit = erp_action->unit;
2907 2826
2908 zfcp_erp_timeout_init(erp_action);
2909 retval = zfcp_fsf_open_unit(erp_action); 2827 retval = zfcp_fsf_open_unit(erp_action);
2910 if (retval == -ENOMEM) { 2828 if (retval == -ENOMEM) {
2911 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem"); 2829 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
@@ -2930,14 +2848,13 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2930 return retval; 2848 return retval;
2931} 2849}
2932 2850
2933static inline void 2851void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req)
2934zfcp_erp_timeout_init(struct zfcp_erp_action *erp_action)
2935{ 2852{
2936 init_timer(&erp_action->timer); 2853 BUG_ON(!fsf_req->erp_action);
2937 erp_action->timer.function = zfcp_erp_timeout_handler; 2854 fsf_req->timer.function = zfcp_erp_timeout_handler;
2938 erp_action->timer.data = (unsigned long) erp_action; 2855 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
2939 /* jiffies will be added in zfcp_fsf_req_send */ 2856 fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
2940 erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT; 2857 add_timer(&fsf_req->timer);
2941} 2858}
2942 2859
2943/* 2860/*
@@ -3241,7 +3158,7 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3241} 3158}
3242 3159
3243 3160
3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 3161static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3245{ 3162{
3246 struct zfcp_port *port; 3163 struct zfcp_port *port;
3247 3164
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 146d7a2b4c4a..b8794d77285d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -55,7 +55,6 @@ extern void zfcp_unit_dequeue(struct zfcp_unit *);
55 55
56/******************************* S/390 IO ************************************/ 56/******************************* S/390 IO ************************************/
57extern int zfcp_ccw_register(void); 57extern int zfcp_ccw_register(void);
58extern void zfcp_ccw_unregister(void);
59 58
60extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); 59extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
61extern int zfcp_qdio_allocate(struct zfcp_adapter *); 60extern int zfcp_qdio_allocate(struct zfcp_adapter *);
@@ -88,8 +87,8 @@ extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
88 struct fsf_qtcb_bottom_port *); 87 struct fsf_qtcb_bottom_port *);
89extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 88extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
90 u32, u32, struct zfcp_sg_list *); 89 u32, u32, struct zfcp_sg_list *);
91extern void zfcp_fsf_request_timeout_handler(unsigned long); 90extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
92extern void zfcp_fsf_scsi_er_timeout_handler(unsigned long); 91extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
93extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 92extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
94extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); 93extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
95extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, 94extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
@@ -99,8 +98,7 @@ extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
99extern int zfcp_fsf_send_els(struct zfcp_send_els *); 98extern int zfcp_fsf_send_els(struct zfcp_send_els *);
100extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 99extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
101 struct zfcp_unit *, 100 struct zfcp_unit *,
102 struct scsi_cmnd *, 101 struct scsi_cmnd *, int, int);
103 struct timer_list*, int);
104extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *); 102extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
105extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *); 103extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
106extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 104extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -124,14 +122,11 @@ extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
124extern void set_host_byte(u32 *, char); 122extern void set_host_byte(u32 *, char);
125extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(u32 *, char);
126extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
127extern void zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *);
128extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
129 126
130extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *, 127extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
131 struct scsi_cmnd *, struct timer_list *); 128 struct scsi_cmnd *, int);
132extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, 129extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
133 struct timer_list *);
134extern struct scsi_transport_template *zfcp_transport_template;
135extern struct fc_function_template zfcp_transport_functions; 130extern struct fc_function_template zfcp_transport_functions;
136 131
137/******************************** ERP ****************************************/ 132/******************************** ERP ****************************************/
@@ -139,7 +134,6 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 134extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 135extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 136extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 137
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 138extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 139extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -187,7 +181,7 @@ extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
187 struct zfcp_fsf_req *); 181 struct zfcp_fsf_req *);
188extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 182extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
189 struct scsi_cmnd *, struct zfcp_fsf_req *, 183 struct scsi_cmnd *, struct zfcp_fsf_req *,
190 struct zfcp_fsf_req *); 184 unsigned long);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 185extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 186 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); 187extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ff2eacf5ec8c..277826cdd0c8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -42,7 +42,7 @@ static inline int zfcp_fsf_req_sbal_check(
42static inline int zfcp_use_one_sbal( 42static inline int zfcp_use_one_sbal(
43 struct scatterlist *, int, struct scatterlist *, int); 43 struct scatterlist *, int, struct scatterlist *, int);
44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); 44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
45static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *); 45static int zfcp_fsf_req_send(struct zfcp_fsf_req *);
46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
@@ -100,14 +100,19 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
100 if (req_flags & ZFCP_REQ_NO_QTCB) 100 if (req_flags & ZFCP_REQ_NO_QTCB)
101 size = sizeof(struct zfcp_fsf_req); 101 size = sizeof(struct zfcp_fsf_req);
102 else 102 else
103 size = sizeof(struct zfcp_fsf_req_pool_element); 103 size = sizeof(struct zfcp_fsf_req_qtcb);
104 104
105 if (likely(pool != NULL)) 105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC); 106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else 107 else {
108 ptr = kmalloc(size, GFP_ATOMIC); 108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 SLAB_ATOMIC);
113 }
109 114
110 if (unlikely(NULL == ptr)) 115 if (unlikely(!ptr))
111 goto out; 116 goto out;
112 117
113 memset(ptr, 0, size); 118 memset(ptr, 0, size);
@@ -115,9 +120,8 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
115 if (req_flags & ZFCP_REQ_NO_QTCB) { 120 if (req_flags & ZFCP_REQ_NO_QTCB) {
116 fsf_req = (struct zfcp_fsf_req *) ptr; 121 fsf_req = (struct zfcp_fsf_req *) ptr;
117 } else { 122 } else {
118 fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req; 123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req;
119 fsf_req->qtcb = 124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb;
120 &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
121 } 125 }
122 126
123 fsf_req->pool = pool; 127 fsf_req->pool = pool;
@@ -139,10 +143,17 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
139void 143void
140zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) 144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
141{ 145{
142 if (likely(fsf_req->pool != NULL)) 146 if (likely(fsf_req->pool)) {
143 mempool_free(fsf_req, fsf_req->pool); 147 mempool_free(fsf_req, fsf_req->pool);
144 else 148 return;
145 kfree(fsf_req); 149 }
150
151 if (fsf_req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req);
153 return;
154 }
155
156 kfree(fsf_req);
146} 157}
147 158
148/** 159/**
@@ -214,8 +225,10 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
214 */ 225 */
215 zfcp_fsf_status_read_handler(fsf_req); 226 zfcp_fsf_status_read_handler(fsf_req);
216 goto out; 227 goto out;
217 } else 228 } else {
229 del_timer(&fsf_req->timer);
218 zfcp_fsf_protstatus_eval(fsf_req); 230 zfcp_fsf_protstatus_eval(fsf_req);
231 }
219 232
220 /* 233 /*
221 * fsf_req may be deleted due to waking up functions, so 234 * fsf_req may be deleted due to waking up functions, so
@@ -774,8 +787,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
774 sbale->addr = (void *) status_buffer; 787 sbale->addr = (void *) status_buffer;
775 sbale->length = sizeof(struct fsf_status_read_buffer); 788 sbale->length = sizeof(struct fsf_status_read_buffer);
776 789
777 /* start QDIO request for this FSF request */ 790 retval = zfcp_fsf_req_send(fsf_req);
778 retval = zfcp_fsf_req_send(fsf_req, NULL);
779 if (retval) { 791 if (retval) {
780 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " 792 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
781 "environment.\n"); 793 "environment.\n");
@@ -1101,8 +1113,8 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1101 struct zfcp_unit *unit, int req_flags) 1113 struct zfcp_unit *unit, int req_flags)
1102{ 1114{
1103 volatile struct qdio_buffer_element *sbale; 1115 volatile struct qdio_buffer_element *sbale;
1104 unsigned long lock_flags;
1105 struct zfcp_fsf_req *fsf_req = NULL; 1116 struct zfcp_fsf_req *fsf_req = NULL;
1117 unsigned long lock_flags;
1106 int retval = 0; 1118 int retval = 0;
1107 1119
1108 /* setup new FSF request */ 1120 /* setup new FSF request */
@@ -1132,12 +1144,9 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1132 /* set handle of request which should be aborted */ 1144 /* set handle of request which should be aborted */
1133 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; 1145 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1134 1146
1135 /* start QDIO request for this FSF request */ 1147 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
1136 1148 retval = zfcp_fsf_req_send(fsf_req);
1137 zfcp_fsf_start_scsi_er_timer(adapter);
1138 retval = zfcp_fsf_req_send(fsf_req, NULL);
1139 if (retval) { 1149 if (retval) {
1140 del_timer(&adapter->scsi_er_timer);
1141 ZFCP_LOG_INFO("error: Failed to send abort command request " 1150 ZFCP_LOG_INFO("error: Failed to send abort command request "
1142 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 1151 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
1143 zfcp_get_busid_by_adapter(adapter), 1152 zfcp_get_busid_by_adapter(adapter),
@@ -1173,8 +1182,6 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1173 unsigned char status_qual = 1182 unsigned char status_qual =
1174 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1183 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1175 1184
1176 del_timer(&new_fsf_req->adapter->scsi_er_timer);
1177
1178 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1185 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1179 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */ 1186 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1180 goto skip_fsfstatus; 1187 goto skip_fsfstatus;
@@ -1380,11 +1387,6 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1380 goto failed_req; 1387 goto failed_req;
1381 } 1388 }
1382 1389
1383 if (erp_action != NULL) {
1384 erp_action->fsf_req = fsf_req;
1385 fsf_req->erp_action = erp_action;
1386 }
1387
1388 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1390 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1389 if (zfcp_use_one_sbal(ct->req, ct->req_count, 1391 if (zfcp_use_one_sbal(ct->req, ct->req_count,
1390 ct->resp, ct->resp_count)){ 1392 ct->resp, ct->resp_count)){
@@ -1451,8 +1453,14 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1451 1453
1452 zfcp_san_dbf_event_ct_request(fsf_req); 1454 zfcp_san_dbf_event_ct_request(fsf_req);
1453 1455
1454 /* start QDIO request for this FSF request */ 1456 if (erp_action) {
1455 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1457 erp_action->fsf_req = fsf_req;
1458 fsf_req->erp_action = erp_action;
1459 zfcp_erp_start_timer(fsf_req);
1460 } else
1461 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1462
1463 ret = zfcp_fsf_req_send(fsf_req);
1456 if (ret) { 1464 if (ret) {
1457 ZFCP_LOG_DEBUG("error: initiation of CT request failed " 1465 ZFCP_LOG_DEBUG("error: initiation of CT request failed "
1458 "(adapter %s, port 0x%016Lx)\n", 1466 "(adapter %s, port 0x%016Lx)\n",
@@ -1749,8 +1757,8 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1749 1757
1750 zfcp_san_dbf_event_els_request(fsf_req); 1758 zfcp_san_dbf_event_els_request(fsf_req);
1751 1759
1752 /* start QDIO request for this FSF request */ 1760 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1753 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1761 ret = zfcp_fsf_req_send(fsf_req);
1754 if (ret) { 1762 if (ret) {
1755 ZFCP_LOG_DEBUG("error: initiation of ELS request failed " 1763 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1756 "(adapter %s, port d_id: 0x%08x)\n", 1764 "(adapter %s, port d_id: 0x%08x)\n",
@@ -1947,6 +1955,7 @@ int
1947zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1955zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1948{ 1956{
1949 volatile struct qdio_buffer_element *sbale; 1957 volatile struct qdio_buffer_element *sbale;
1958 struct zfcp_fsf_req *fsf_req;
1950 unsigned long lock_flags; 1959 unsigned long lock_flags;
1951 int retval = 0; 1960 int retval = 0;
1952 1961
@@ -1955,7 +1964,7 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1955 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1964 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1956 ZFCP_REQ_AUTO_CLEANUP, 1965 ZFCP_REQ_AUTO_CLEANUP,
1957 erp_action->adapter->pool.fsf_req_erp, 1966 erp_action->adapter->pool.fsf_req_erp,
1958 &lock_flags, &(erp_action->fsf_req)); 1967 &lock_flags, &fsf_req);
1959 if (retval < 0) { 1968 if (retval < 0) {
1960 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1969 ZFCP_LOG_INFO("error: Could not create exchange configuration "
1961 "data request for adapter %s.\n", 1970 "data request for adapter %s.\n",
@@ -1963,26 +1972,26 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1963 goto out; 1972 goto out;
1964 } 1973 }
1965 1974
1966 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 1975 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1967 erp_action->fsf_req->sbal_curr, 0);
1968 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1976 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1969 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1977 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1970 1978
1971 erp_action->fsf_req->erp_action = erp_action; 1979 fsf_req->qtcb->bottom.config.feature_selection =
1972 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1973 FSF_FEATURE_CFDC | 1980 FSF_FEATURE_CFDC |
1974 FSF_FEATURE_LUN_SHARING | 1981 FSF_FEATURE_LUN_SHARING |
1975 FSF_FEATURE_NOTIFICATION_LOST | 1982 FSF_FEATURE_NOTIFICATION_LOST |
1976 FSF_FEATURE_UPDATE_ALERT; 1983 FSF_FEATURE_UPDATE_ALERT;
1984 fsf_req->erp_action = erp_action;
1985 erp_action->fsf_req = fsf_req;
1977 1986
1978 /* start QDIO request for this FSF request */ 1987 zfcp_erp_start_timer(fsf_req);
1979 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1988 retval = zfcp_fsf_req_send(fsf_req);
1980 if (retval) { 1989 if (retval) {
1981 ZFCP_LOG_INFO 1990 ZFCP_LOG_INFO
1982 ("error: Could not send exchange configuration data " 1991 ("error: Could not send exchange configuration data "
1983 "command on the adapter %s\n", 1992 "command on the adapter %s\n",
1984 zfcp_get_busid_by_adapter(erp_action->adapter)); 1993 zfcp_get_busid_by_adapter(erp_action->adapter));
1985 zfcp_fsf_req_free(erp_action->fsf_req); 1994 zfcp_fsf_req_free(fsf_req);
1986 erp_action->fsf_req = NULL; 1995 erp_action->fsf_req = NULL;
1987 goto out; 1996 goto out;
1988 } 1997 }
@@ -2212,10 +2221,9 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2212 struct fsf_qtcb_bottom_port *data) 2221 struct fsf_qtcb_bottom_port *data)
2213{ 2222{
2214 volatile struct qdio_buffer_element *sbale; 2223 volatile struct qdio_buffer_element *sbale;
2215 int retval = 0;
2216 unsigned long lock_flags;
2217 struct zfcp_fsf_req *fsf_req; 2224 struct zfcp_fsf_req *fsf_req;
2218 struct timer_list *timer; 2225 unsigned long lock_flags;
2226 int retval = 0;
2219 2227
2220 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2228 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2221 ZFCP_LOG_INFO("error: exchange port data " 2229 ZFCP_LOG_INFO("error: exchange port data "
@@ -2248,22 +2256,11 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2248 if (erp_action) { 2256 if (erp_action) {
2249 erp_action->fsf_req = fsf_req; 2257 erp_action->fsf_req = fsf_req;
2250 fsf_req->erp_action = erp_action; 2258 fsf_req->erp_action = erp_action;
2251 timer = &erp_action->timer; 2259 zfcp_erp_start_timer(fsf_req);
2252 } else { 2260 } else
2253 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC); 2261 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
2254 if (!timer) {
2255 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2256 lock_flags);
2257 zfcp_fsf_req_free(fsf_req);
2258 return -ENOMEM;
2259 }
2260 init_timer(timer);
2261 timer->function = zfcp_fsf_request_timeout_handler;
2262 timer->data = (unsigned long) adapter;
2263 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
2264 }
2265 2262
2266 retval = zfcp_fsf_req_send(fsf_req, timer); 2263 retval = zfcp_fsf_req_send(fsf_req);
2267 if (retval) { 2264 if (retval) {
2268 ZFCP_LOG_INFO("error: Could not send an exchange port data " 2265 ZFCP_LOG_INFO("error: Could not send an exchange port data "
2269 "command on the adapter %s\n", 2266 "command on the adapter %s\n",
@@ -2271,8 +2268,6 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2271 zfcp_fsf_req_free(fsf_req); 2268 zfcp_fsf_req_free(fsf_req);
2272 if (erp_action) 2269 if (erp_action)
2273 erp_action->fsf_req = NULL; 2270 erp_action->fsf_req = NULL;
2274 else
2275 kfree(timer);
2276 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2271 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2277 lock_flags); 2272 lock_flags);
2278 return retval; 2273 return retval;
@@ -2283,9 +2278,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2283 if (!erp_action) { 2278 if (!erp_action) {
2284 wait_event(fsf_req->completion_wq, 2279 wait_event(fsf_req->completion_wq,
2285 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2280 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2286 del_timer_sync(timer);
2287 zfcp_fsf_req_free(fsf_req); 2281 zfcp_fsf_req_free(fsf_req);
2288 kfree(timer);
2289 } 2282 }
2290 return retval; 2283 return retval;
2291} 2284}
@@ -2367,6 +2360,7 @@ int
2367zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 2360zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2368{ 2361{
2369 volatile struct qdio_buffer_element *sbale; 2362 volatile struct qdio_buffer_element *sbale;
2363 struct zfcp_fsf_req *fsf_req;
2370 unsigned long lock_flags; 2364 unsigned long lock_flags;
2371 int retval = 0; 2365 int retval = 0;
2372 2366
@@ -2375,7 +2369,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2375 FSF_QTCB_OPEN_PORT_WITH_DID, 2369 FSF_QTCB_OPEN_PORT_WITH_DID,
2376 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2370 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2377 erp_action->adapter->pool.fsf_req_erp, 2371 erp_action->adapter->pool.fsf_req_erp,
2378 &lock_flags, &(erp_action->fsf_req)); 2372 &lock_flags, &fsf_req);
2379 if (retval < 0) { 2373 if (retval < 0) {
2380 ZFCP_LOG_INFO("error: Could not create open port request " 2374 ZFCP_LOG_INFO("error: Could not create open port request "
2381 "for port 0x%016Lx on adapter %s.\n", 2375 "for port 0x%016Lx on adapter %s.\n",
@@ -2384,24 +2378,24 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2384 goto out; 2378 goto out;
2385 } 2379 }
2386 2380
2387 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2381 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2388 erp_action->fsf_req->sbal_curr, 0);
2389 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2382 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2390 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2383 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2391 2384
2392 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2385 fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2393 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2386 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2394 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2387 fsf_req->data = (unsigned long) erp_action->port;
2395 erp_action->fsf_req->erp_action = erp_action; 2388 fsf_req->erp_action = erp_action;
2389 erp_action->fsf_req = fsf_req;
2396 2390
2397 /* start QDIO request for this FSF request */ 2391 zfcp_erp_start_timer(fsf_req);
2398 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2392 retval = zfcp_fsf_req_send(fsf_req);
2399 if (retval) { 2393 if (retval) {
2400 ZFCP_LOG_INFO("error: Could not send open port request for " 2394 ZFCP_LOG_INFO("error: Could not send open port request for "
2401 "port 0x%016Lx on adapter %s.\n", 2395 "port 0x%016Lx on adapter %s.\n",
2402 erp_action->port->wwpn, 2396 erp_action->port->wwpn,
2403 zfcp_get_busid_by_adapter(erp_action->adapter)); 2397 zfcp_get_busid_by_adapter(erp_action->adapter));
2404 zfcp_fsf_req_free(erp_action->fsf_req); 2398 zfcp_fsf_req_free(fsf_req);
2405 erp_action->fsf_req = NULL; 2399 erp_action->fsf_req = NULL;
2406 goto out; 2400 goto out;
2407 } 2401 }
@@ -2623,6 +2617,7 @@ int
2623zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 2617zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2624{ 2618{
2625 volatile struct qdio_buffer_element *sbale; 2619 volatile struct qdio_buffer_element *sbale;
2620 struct zfcp_fsf_req *fsf_req;
2626 unsigned long lock_flags; 2621 unsigned long lock_flags;
2627 int retval = 0; 2622 int retval = 0;
2628 2623
@@ -2631,7 +2626,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2631 FSF_QTCB_CLOSE_PORT, 2626 FSF_QTCB_CLOSE_PORT,
2632 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2627 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2633 erp_action->adapter->pool.fsf_req_erp, 2628 erp_action->adapter->pool.fsf_req_erp,
2634 &lock_flags, &(erp_action->fsf_req)); 2629 &lock_flags, &fsf_req);
2635 if (retval < 0) { 2630 if (retval < 0) {
2636 ZFCP_LOG_INFO("error: Could not create a close port request " 2631 ZFCP_LOG_INFO("error: Could not create a close port request "
2637 "for port 0x%016Lx on adapter %s.\n", 2632 "for port 0x%016Lx on adapter %s.\n",
@@ -2640,25 +2635,25 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2640 goto out; 2635 goto out;
2641 } 2636 }
2642 2637
2643 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2638 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2644 erp_action->fsf_req->sbal_curr, 0);
2645 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2639 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2646 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2640 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2647 2641
2648 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2642 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2649 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2643 fsf_req->data = (unsigned long) erp_action->port;
2650 erp_action->fsf_req->erp_action = erp_action; 2644 fsf_req->erp_action = erp_action;
2651 erp_action->fsf_req->qtcb->header.port_handle = 2645 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2652 erp_action->port->handle; 2646 fsf_req->erp_action = erp_action;
2653 2647 erp_action->fsf_req = fsf_req;
2654 /* start QDIO request for this FSF request */ 2648
2655 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2649 zfcp_erp_start_timer(fsf_req);
2650 retval = zfcp_fsf_req_send(fsf_req);
2656 if (retval) { 2651 if (retval) {
2657 ZFCP_LOG_INFO("error: Could not send a close port request for " 2652 ZFCP_LOG_INFO("error: Could not send a close port request for "
2658 "port 0x%016Lx on adapter %s.\n", 2653 "port 0x%016Lx on adapter %s.\n",
2659 erp_action->port->wwpn, 2654 erp_action->port->wwpn,
2660 zfcp_get_busid_by_adapter(erp_action->adapter)); 2655 zfcp_get_busid_by_adapter(erp_action->adapter));
2661 zfcp_fsf_req_free(erp_action->fsf_req); 2656 zfcp_fsf_req_free(fsf_req);
2662 erp_action->fsf_req = NULL; 2657 erp_action->fsf_req = NULL;
2663 goto out; 2658 goto out;
2664 } 2659 }
@@ -2755,16 +2750,17 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2755int 2750int
2756zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 2751zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2757{ 2752{
2758 int retval = 0;
2759 unsigned long lock_flags;
2760 volatile struct qdio_buffer_element *sbale; 2753 volatile struct qdio_buffer_element *sbale;
2754 struct zfcp_fsf_req *fsf_req;
2755 unsigned long lock_flags;
2756 int retval = 0;
2761 2757
2762 /* setup new FSF request */ 2758 /* setup new FSF request */
2763 retval = zfcp_fsf_req_create(erp_action->adapter, 2759 retval = zfcp_fsf_req_create(erp_action->adapter,
2764 FSF_QTCB_CLOSE_PHYSICAL_PORT, 2760 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2765 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2761 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2766 erp_action->adapter->pool.fsf_req_erp, 2762 erp_action->adapter->pool.fsf_req_erp,
2767 &lock_flags, &erp_action->fsf_req); 2763 &lock_flags, &fsf_req);
2768 if (retval < 0) { 2764 if (retval < 0) {
2769 ZFCP_LOG_INFO("error: Could not create close physical port " 2765 ZFCP_LOG_INFO("error: Could not create close physical port "
2770 "request (adapter %s, port 0x%016Lx)\n", 2766 "request (adapter %s, port 0x%016Lx)\n",
@@ -2774,8 +2770,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2774 goto out; 2770 goto out;
2775 } 2771 }
2776 2772
2777 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2773 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2778 erp_action->fsf_req->sbal_curr, 0);
2779 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2774 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2780 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2775 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2781 2776
@@ -2783,20 +2778,19 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2783 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2778 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2784 &erp_action->port->status); 2779 &erp_action->port->status);
2785 /* save a pointer to this port */ 2780 /* save a pointer to this port */
2786 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2781 fsf_req->data = (unsigned long) erp_action->port;
2787 /* port to be closed */ 2782 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2788 erp_action->fsf_req->qtcb->header.port_handle = 2783 fsf_req->erp_action = erp_action;
2789 erp_action->port->handle; 2784 erp_action->fsf_req = fsf_req;
2790 erp_action->fsf_req->erp_action = erp_action; 2785
2791 2786 zfcp_erp_start_timer(fsf_req);
2792 /* start QDIO request for this FSF request */ 2787 retval = zfcp_fsf_req_send(fsf_req);
2793 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2794 if (retval) { 2788 if (retval) {
2795 ZFCP_LOG_INFO("error: Could not send close physical port " 2789 ZFCP_LOG_INFO("error: Could not send close physical port "
2796 "request (adapter %s, port 0x%016Lx)\n", 2790 "request (adapter %s, port 0x%016Lx)\n",
2797 zfcp_get_busid_by_adapter(erp_action->adapter), 2791 zfcp_get_busid_by_adapter(erp_action->adapter),
2798 erp_action->port->wwpn); 2792 erp_action->port->wwpn);
2799 zfcp_fsf_req_free(erp_action->fsf_req); 2793 zfcp_fsf_req_free(fsf_req);
2800 erp_action->fsf_req = NULL; 2794 erp_action->fsf_req = NULL;
2801 goto out; 2795 goto out;
2802 } 2796 }
@@ -2961,6 +2955,7 @@ int
2961zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 2955zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2962{ 2956{
2963 volatile struct qdio_buffer_element *sbale; 2957 volatile struct qdio_buffer_element *sbale;
2958 struct zfcp_fsf_req *fsf_req;
2964 unsigned long lock_flags; 2959 unsigned long lock_flags;
2965 int retval = 0; 2960 int retval = 0;
2966 2961
@@ -2969,7 +2964,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2969 FSF_QTCB_OPEN_LUN, 2964 FSF_QTCB_OPEN_LUN,
2970 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2965 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2971 erp_action->adapter->pool.fsf_req_erp, 2966 erp_action->adapter->pool.fsf_req_erp,
2972 &lock_flags, &(erp_action->fsf_req)); 2967 &lock_flags, &fsf_req);
2973 if (retval < 0) { 2968 if (retval < 0) {
2974 ZFCP_LOG_INFO("error: Could not create open unit request for " 2969 ZFCP_LOG_INFO("error: Could not create open unit request for "
2975 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 2970 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -2979,24 +2974,22 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2979 goto out; 2974 goto out;
2980 } 2975 }
2981 2976
2982 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2977 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2983 erp_action->fsf_req->sbal_curr, 0);
2984 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2978 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2985 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2979 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2986 2980
2987 erp_action->fsf_req->qtcb->header.port_handle = 2981 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2988 erp_action->port->handle; 2982 fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
2989 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2990 erp_action->unit->fcp_lun;
2991 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 2983 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2992 erp_action->fsf_req->qtcb->bottom.support.option = 2984 fsf_req->qtcb->bottom.support.option =
2993 FSF_OPEN_LUN_SUPPRESS_BOXING; 2985 FSF_OPEN_LUN_SUPPRESS_BOXING;
2994 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2986 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2995 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 2987 fsf_req->data = (unsigned long) erp_action->unit;
2996 erp_action->fsf_req->erp_action = erp_action; 2988 fsf_req->erp_action = erp_action;
2989 erp_action->fsf_req = fsf_req;
2997 2990
2998 /* start QDIO request for this FSF request */ 2991 zfcp_erp_start_timer(fsf_req);
2999 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2992 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3000 if (retval) { 2993 if (retval) {
3001 ZFCP_LOG_INFO("error: Could not send an open unit request " 2994 ZFCP_LOG_INFO("error: Could not send an open unit request "
3002 "on the adapter %s, port 0x%016Lx for " 2995 "on the adapter %s, port 0x%016Lx for "
@@ -3004,7 +2997,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3004 zfcp_get_busid_by_adapter(erp_action->adapter), 2997 zfcp_get_busid_by_adapter(erp_action->adapter),
3005 erp_action->port->wwpn, 2998 erp_action->port->wwpn,
3006 erp_action->unit->fcp_lun); 2999 erp_action->unit->fcp_lun);
3007 zfcp_fsf_req_free(erp_action->fsf_req); 3000 zfcp_fsf_req_free(fsf_req);
3008 erp_action->fsf_req = NULL; 3001 erp_action->fsf_req = NULL;
3009 goto out; 3002 goto out;
3010 } 3003 }
@@ -3297,6 +3290,7 @@ int
3297zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 3290zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3298{ 3291{
3299 volatile struct qdio_buffer_element *sbale; 3292 volatile struct qdio_buffer_element *sbale;
3293 struct zfcp_fsf_req *fsf_req;
3300 unsigned long lock_flags; 3294 unsigned long lock_flags;
3301 int retval = 0; 3295 int retval = 0;
3302 3296
@@ -3305,7 +3299,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3305 FSF_QTCB_CLOSE_LUN, 3299 FSF_QTCB_CLOSE_LUN,
3306 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 3300 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
3307 erp_action->adapter->pool.fsf_req_erp, 3301 erp_action->adapter->pool.fsf_req_erp,
3308 &lock_flags, &(erp_action->fsf_req)); 3302 &lock_flags, &fsf_req);
3309 if (retval < 0) { 3303 if (retval < 0) {
3310 ZFCP_LOG_INFO("error: Could not create close unit request for " 3304 ZFCP_LOG_INFO("error: Could not create close unit request for "
3311 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 3305 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -3315,27 +3309,26 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3315 goto out; 3309 goto out;
3316 } 3310 }
3317 3311
3318 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 3312 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3319 erp_action->fsf_req->sbal_curr, 0);
3320 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 3313 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3321 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 3314 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3322 3315
3323 erp_action->fsf_req->qtcb->header.port_handle = 3316 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
3324 erp_action->port->handle; 3317 fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3325 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3326 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3318 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3327 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 3319 fsf_req->data = (unsigned long) erp_action->unit;
3328 erp_action->fsf_req->erp_action = erp_action; 3320 fsf_req->erp_action = erp_action;
3321 erp_action->fsf_req = fsf_req;
3329 3322
3330 /* start QDIO request for this FSF request */ 3323 zfcp_erp_start_timer(fsf_req);
3331 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 3324 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3332 if (retval) { 3325 if (retval) {
3333 ZFCP_LOG_INFO("error: Could not send a close unit request for " 3326 ZFCP_LOG_INFO("error: Could not send a close unit request for "
3334 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n", 3327 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3335 erp_action->unit->fcp_lun, 3328 erp_action->unit->fcp_lun,
3336 erp_action->port->wwpn, 3329 erp_action->port->wwpn,
3337 zfcp_get_busid_by_adapter(erp_action->adapter)); 3330 zfcp_get_busid_by_adapter(erp_action->adapter));
3338 zfcp_fsf_req_free(erp_action->fsf_req); 3331 zfcp_fsf_req_free(fsf_req);
3339 erp_action->fsf_req = NULL; 3332 erp_action->fsf_req = NULL;
3340 goto out; 3333 goto out;
3341 } 3334 }
@@ -3488,7 +3481,7 @@ int
3488zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, 3481zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3489 struct zfcp_unit *unit, 3482 struct zfcp_unit *unit,
3490 struct scsi_cmnd * scsi_cmnd, 3483 struct scsi_cmnd * scsi_cmnd,
3491 struct timer_list *timer, int req_flags) 3484 int use_timer, int req_flags)
3492{ 3485{
3493 struct zfcp_fsf_req *fsf_req = NULL; 3486 struct zfcp_fsf_req *fsf_req = NULL;
3494 struct fcp_cmnd_iu *fcp_cmnd_iu; 3487 struct fcp_cmnd_iu *fcp_cmnd_iu;
@@ -3516,7 +3509,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3516 fsf_req->unit = unit; 3509 fsf_req->unit = unit;
3517 3510
3518 /* associate FSF request with SCSI request (for look up on abort) */ 3511 /* associate FSF request with SCSI request (for look up on abort) */
3519 scsi_cmnd->host_scribble = (char *) fsf_req; 3512 scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
3520 3513
3521 /* associate SCSI command with FSF request */ 3514 /* associate SCSI command with FSF request */
3522 fsf_req->data = (unsigned long) scsi_cmnd; 3515 fsf_req->data = (unsigned long) scsi_cmnd;
@@ -3629,11 +3622,10 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3629 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 3622 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3630 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 3623 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3631 3624
3632 /* 3625 if (use_timer)
3633 * start QDIO request for this FSF request 3626 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
3634 * covered by an SBALE) 3627
3635 */ 3628 retval = zfcp_fsf_req_send(fsf_req);
3636 retval = zfcp_fsf_req_send(fsf_req, timer);
3637 if (unlikely(retval < 0)) { 3629 if (unlikely(retval < 0)) {
3638 ZFCP_LOG_INFO("error: Could not send FCP command request " 3630 ZFCP_LOG_INFO("error: Could not send FCP command request "
3639 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 3631 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
@@ -3718,11 +3710,9 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3718 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 3710 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3719 fcp_cmnd_iu->task_management_flags = tm_flags; 3711 fcp_cmnd_iu->task_management_flags = tm_flags;
3720 3712
3721 /* start QDIO request for this FSF request */ 3713 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
3722 zfcp_fsf_start_scsi_er_timer(adapter); 3714 retval = zfcp_fsf_req_send(fsf_req);
3723 retval = zfcp_fsf_req_send(fsf_req, NULL);
3724 if (retval) { 3715 if (retval) {
3725 del_timer(&adapter->scsi_er_timer);
3726 ZFCP_LOG_INFO("error: Could not send an FCP-command (task " 3716 ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
3727 "management) on adapter %s, port 0x%016Lx for " 3717 "management) on adapter %s, port 0x%016Lx for "
3728 "unit LUN 0x%016Lx\n", 3718 "unit LUN 0x%016Lx\n",
@@ -4226,7 +4216,6 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4226 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4216 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4227 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data; 4217 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4228 4218
4229 del_timer(&fsf_req->adapter->scsi_er_timer);
4230 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4219 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4231 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 4220 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4232 goto skip_fsfstatus; 4221 goto skip_fsfstatus;
@@ -4295,7 +4284,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4295 struct zfcp_fsf_req *fsf_req; 4284 struct zfcp_fsf_req *fsf_req;
4296 struct fsf_qtcb_bottom_support *bottom; 4285 struct fsf_qtcb_bottom_support *bottom;
4297 volatile struct qdio_buffer_element *sbale; 4286 volatile struct qdio_buffer_element *sbale;
4298 struct timer_list *timer;
4299 unsigned long lock_flags; 4287 unsigned long lock_flags;
4300 int req_flags = 0; 4288 int req_flags = 0;
4301 int direction; 4289 int direction;
@@ -4327,12 +4315,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4327 goto out; 4315 goto out;
4328 } 4316 }
4329 4317
4330 timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
4331 if (!timer) {
4332 retval = -ENOMEM;
4333 goto out;
4334 }
4335
4336 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags, 4318 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4337 NULL, &lock_flags, &fsf_req); 4319 NULL, &lock_flags, &fsf_req);
4338 if (retval < 0) { 4320 if (retval < 0) {
@@ -4367,12 +4349,8 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4367 } else 4349 } else
4368 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 4350 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4369 4351
4370 init_timer(timer); 4352 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
4371 timer->function = zfcp_fsf_request_timeout_handler; 4353 retval = zfcp_fsf_req_send(fsf_req);
4372 timer->data = (unsigned long) adapter;
4373 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
4374
4375 retval = zfcp_fsf_req_send(fsf_req, timer);
4376 if (retval < 0) { 4354 if (retval < 0) {
4377 ZFCP_LOG_INFO("initiation of cfdc up/download failed" 4355 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4378 "(adapter %s)\n", 4356 "(adapter %s)\n",
@@ -4392,15 +4370,12 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4392 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 4370 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4393 4371
4394 *fsf_req_ptr = fsf_req; 4372 *fsf_req_ptr = fsf_req;
4395 del_timer_sync(timer); 4373 goto out;
4396 goto free_timer;
4397 4374
4398 free_fsf_req: 4375 free_fsf_req:
4399 zfcp_fsf_req_free(fsf_req); 4376 zfcp_fsf_req_free(fsf_req);
4400 unlock_queue_lock: 4377 unlock_queue_lock:
4401 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 4378 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4402 free_timer:
4403 kfree(timer);
4404 out: 4379 out:
4405 return retval; 4380 return retval;
4406} 4381}
@@ -4656,7 +4631,6 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4656{ 4631{
4657 volatile struct qdio_buffer_element *sbale; 4632 volatile struct qdio_buffer_element *sbale;
4658 struct zfcp_fsf_req *fsf_req = NULL; 4633 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4660 int ret = 0; 4634 int ret = 0;
4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4635 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4662 4636
@@ -4673,12 +4647,13 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4673 fsf_req->fsf_command = fsf_cmd; 4647 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list); 4648 INIT_LIST_HEAD(&fsf_req->list);
4675 4649
4676 /* unique request id */ 4650 /* this is serialized (we are holding req_queue-lock of adapter */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags); 4651 if (adapter->req_no == 0)
4652 adapter->req_no++;
4678 fsf_req->req_id = adapter->req_no++; 4653 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4680 4654
4681 zfcp_fsf_req_qtcb_init(fsf_req); 4655 init_timer(&fsf_req->timer);
4656 zfcp_fsf_req_qtcb_init(fsf_req);
4682 4657
4683 /* initialize waitqueue which may be used to wait on 4658 /* initialize waitqueue which may be used to wait on
4684 this request completion */ 4659 this request completion */
@@ -4748,8 +4723,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4748 * returns: 0 - request transfer succesfully started 4723 * returns: 0 - request transfer succesfully started
4749 * !0 - start of request transfer failed 4724 * !0 - start of request transfer failed
4750 */ 4725 */
4751static int 4726static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
4752zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4753{ 4727{
4754 struct zfcp_adapter *adapter; 4728 struct zfcp_adapter *adapter;
4755 struct zfcp_qdio_queue *req_queue; 4729 struct zfcp_qdio_queue *req_queue;
@@ -4777,12 +4751,6 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4777 4751
4778 inc_seq_no = (fsf_req->qtcb != NULL); 4752 inc_seq_no = (fsf_req->qtcb != NULL);
4779 4753
4780 /* figure out expiration time of timeout and start timeout */
4781 if (unlikely(timer)) {
4782 timer->expires += jiffies;
4783 add_timer(timer);
4784 }
4785
4786 ZFCP_LOG_TRACE("request queue of adapter %s: " 4754 ZFCP_LOG_TRACE("request queue of adapter %s: "
4787 "next free SBAL is %i, %i free SBALs\n", 4755 "next free SBAL is %i, %i free SBALs\n",
4788 zfcp_get_busid_by_adapter(adapter), 4756 zfcp_get_busid_by_adapter(adapter),
@@ -4819,12 +4787,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4819 if (unlikely(retval)) { 4787 if (unlikely(retval)) {
4820 /* Queues are down..... */ 4788 /* Queues are down..... */
4821 retval = -EIO; 4789 retval = -EIO;
4822 /* 4790 del_timer(&fsf_req->timer);
4823 * FIXME(potential race):
4824 * timer might be expired (absolutely unlikely)
4825 */
4826 if (timer)
4827 del_timer(timer);
4828 spin_lock(&adapter->req_list_lock); 4791 spin_lock(&adapter->req_list_lock);
4829 zfcp_reqlist_remove(adapter, fsf_req->req_id); 4792 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4830 spin_unlock(&adapter->req_list_lock); 4793 spin_unlock(&adapter->req_list_lock);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 1bb55086db9f..7cafa34e4c7f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -39,11 +39,10 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
39 39
40static struct device_attribute *zfcp_sysfs_sdev_attrs[]; 40static struct device_attribute *zfcp_sysfs_sdev_attrs[];
41 41
42struct scsi_transport_template *zfcp_transport_template;
43
44struct zfcp_data zfcp_data = { 42struct zfcp_data zfcp_data = {
45 .scsi_host_template = { 43 .scsi_host_template = {
46 .name = ZFCP_NAME, 44 .name = ZFCP_NAME,
45 .module = THIS_MODULE,
47 .proc_name = "zfcp", 46 .proc_name = "zfcp",
48 .slave_alloc = zfcp_scsi_slave_alloc, 47 .slave_alloc = zfcp_scsi_slave_alloc,
49 .slave_configure = zfcp_scsi_slave_configure, 48 .slave_configure = zfcp_scsi_slave_configure,
@@ -232,7 +231,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
232 */ 231 */
233int 232int
234zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, 233zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
235 struct scsi_cmnd *scpnt, struct timer_list *timer) 234 struct scsi_cmnd *scpnt, int use_timer)
236{ 235{
237 int tmp; 236 int tmp;
238 int retval; 237 int retval;
@@ -268,7 +267,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
268 goto out; 267 goto out;
269 } 268 }
270 269
271 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, timer, 270 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
272 ZFCP_REQ_AUTO_CLEANUP); 271 ZFCP_REQ_AUTO_CLEANUP);
273 272
274 if (unlikely(tmp < 0)) { 273 if (unlikely(tmp < 0)) {
@@ -292,21 +291,22 @@ zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
292 * zfcp_scsi_command_sync - send a SCSI command and wait for completion 291 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
293 * @unit: unit where command is sent to 292 * @unit: unit where command is sent to
294 * @scpnt: scsi command to be sent 293 * @scpnt: scsi command to be sent
295 * @timer: timer to be started if request is successfully initiated 294 * @use_timer: indicates whether timer should be setup or not
296 * Return: 0 295 * Return: 0
297 * 296 *
298 * Errors are indicated in scpnt->result 297 * Errors are indicated in scpnt->result
299 */ 298 */
300int 299int
301zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, 300zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
302 struct timer_list *timer) 301 int use_timer)
303{ 302{
304 int ret; 303 int ret;
305 DECLARE_COMPLETION(wait); 304 DECLARE_COMPLETION(wait);
306 305
307 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */ 306 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
308 scpnt->scsi_done = zfcp_scsi_command_sync_handler; 307 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
309 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, timer); 308 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
309 use_timer);
310 if (ret == 0) 310 if (ret == 0)
311 wait_for_completion(&wait); 311 wait_for_completion(&wait);
312 312
@@ -342,7 +342,7 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
343 unit = (struct zfcp_unit *) scpnt->device->hostdata; 343 unit = (struct zfcp_unit *) scpnt->device->hostdata;
344 344
345 return zfcp_scsi_command_async(adapter, unit, scpnt, NULL); 345 return zfcp_scsi_command_async(adapter, unit, scpnt, 0);
346} 346}
347 347
348static struct zfcp_unit * 348static struct zfcp_unit *
@@ -379,16 +379,15 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
379 * will handle late commands. (Usually, the normal completion of late 379 * will handle late commands. (Usually, the normal completion of late
380 * commands is ignored with respect to the running abort operation.) 380 * commands is ignored with respect to the running abort operation.)
381 */ 381 */
382int 382int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
383zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 383{
385 struct Scsi_Host *scsi_host; 384 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 385 struct zfcp_adapter *adapter;
387 struct zfcp_unit *unit; 386 struct zfcp_unit *unit;
388 int retval = SUCCESS; 387 struct zfcp_fsf_req *fsf_req;
389 struct zfcp_fsf_req *new_fsf_req = NULL;
390 struct zfcp_fsf_req *old_fsf_req;
391 unsigned long flags; 388 unsigned long flags;
389 unsigned long old_req_id;
390 int retval = SUCCESS;
392 391
393 scsi_host = scpnt->device->host; 392 scsi_host = scpnt->device->host;
394 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 393 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
@@ -400,55 +399,47 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
400 /* avoid race condition between late normal completion and abort */ 399 /* avoid race condition between late normal completion and abort */
401 write_lock_irqsave(&adapter->abort_lock, flags); 400 write_lock_irqsave(&adapter->abort_lock, flags);
402 401
403 /* 402 /* Check whether corresponding fsf_req is still pending */
404 * Check whether command has just completed and can not be aborted. 403 spin_lock(&adapter->req_list_lock);
405 * Even if the command has just been completed late, we can access 404 fsf_req = zfcp_reqlist_ismember(adapter, (unsigned long)
406 * scpnt since the SCSI stack does not release it at least until 405 scpnt->host_scribble);
407 * this routine returns. (scpnt is parameter passed to this routine 406 spin_unlock(&adapter->req_list_lock);
408 * and must not disappear during abort even on late completion.) 407 if (!fsf_req) {
409 */
410 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
411 if (!old_fsf_req) {
412 write_unlock_irqrestore(&adapter->abort_lock, flags); 408 write_unlock_irqrestore(&adapter->abort_lock, flags);
413 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); 409 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
414 retval = SUCCESS; 410 retval = SUCCESS;
415 goto out; 411 goto out;
416 } 412 }
417 old_fsf_req->data = 0; 413 fsf_req->data = 0;
418 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 414 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
415 old_req_id = fsf_req->req_id;
419 416
420 /* don't access old_fsf_req after releasing the abort_lock */ 417 /* don't access old fsf_req after releasing the abort_lock */
421 write_unlock_irqrestore(&adapter->abort_lock, flags); 418 write_unlock_irqrestore(&adapter->abort_lock, flags);
422 /* call FSF routine which does the abort */ 419
423 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 420 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
424 adapter, unit, 0); 421 if (!fsf_req) {
425 if (!new_fsf_req) {
426 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 422 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
427 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 423 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
428 old_fsf_req); 424 old_req_id);
429 retval = FAILED; 425 retval = FAILED;
430 goto out; 426 goto out;
431 } 427 }
432 428
433 /* wait for completion of abort */ 429 __wait_event(fsf_req->completion_wq,
434 __wait_event(new_fsf_req->completion_wq, 430 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
435 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
436 431
437 /* status should be valid since signals were not permitted */ 432 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
438 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 433 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
439 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
440 NULL);
441 retval = SUCCESS; 434 retval = SUCCESS;
442 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 435 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
443 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, 436 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
444 NULL);
445 retval = SUCCESS; 437 retval = SUCCESS;
446 } else { 438 } else {
447 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, 439 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
448 NULL);
449 retval = FAILED; 440 retval = FAILED;
450 } 441 }
451 zfcp_fsf_req_free(new_fsf_req); 442 zfcp_fsf_req_free(fsf_req);
452 out: 443 out:
453 return retval; 444 return retval;
454} 445}
@@ -548,14 +539,11 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
548 539
549/** 540/**
550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 541 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 542 */
554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 543int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555{ 544{
556 struct zfcp_unit *unit; 545 struct zfcp_unit *unit;
557 struct zfcp_adapter *adapter; 546 struct zfcp_adapter *adapter;
558 unsigned long flags;
559 547
560 unit = (struct zfcp_unit*) scpnt->device->hostdata; 548 unit = (struct zfcp_unit*) scpnt->device->hostdata;
561 adapter = unit->port->adapter; 549 adapter = unit->port->adapter;
@@ -563,22 +551,8 @@ int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
563 ZFCP_LOG_NORMAL("host/bus reset because of problems with " 551 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
564 "unit 0x%016Lx\n", unit->fcp_lun); 552 "unit 0x%016Lx\n", unit->fcp_lun);
565 553
566 write_lock_irqsave(&adapter->erp_lock, flags); 554 zfcp_erp_adapter_reopen(adapter, 0);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 555 zfcp_erp_wait(adapter);
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
582 556
583 return SUCCESS; 557 return SUCCESS;
584} 558}
@@ -607,7 +581,7 @@ zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
607 adapter->scsi_host->max_channel = 0; 581 adapter->scsi_host->max_channel = 0;
608 adapter->scsi_host->unique_id = unique_id++; /* FIXME */ 582 adapter->scsi_host->unique_id = unique_id++; /* FIXME */
609 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; 583 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
610 adapter->scsi_host->transportt = zfcp_transport_template; 584 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
611 585
612 /* 586 /*
613 * save a pointer to our own adapter data structure within 587 * save a pointer to our own adapter data structure within
@@ -648,16 +622,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
648 return; 622 return;
649} 623}
650 624
651
652void
653zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *adapter)
654{
655 adapter->scsi_er_timer.function = zfcp_fsf_scsi_er_timeout_handler;
656 adapter->scsi_er_timer.data = (unsigned long) adapter;
657 adapter->scsi_er_timer.expires = jiffies + ZFCP_SCSI_ER_TIMEOUT;
658 add_timer(&adapter->scsi_er_timer);
659}
660
661/* 625/*
662 * Support functions for FC transport class 626 * Support functions for FC transport class
663 */ 627 */
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index d1c1e75bfd60..1e788e815ce7 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,19 +11,18 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
13 13
14struct sysinfo_1_1_1 14struct sysinfo_1_1_1 {
15{
16 char reserved_0[32]; 15 char reserved_0[32];
17 char manufacturer[16]; 16 char manufacturer[16];
18 char type[4]; 17 char type[4];
19 char reserved_1[12]; 18 char reserved_1[12];
20 char model[16]; 19 char model_capacity[16];
21 char sequence[16]; 20 char sequence[16];
22 char plant[4]; 21 char plant[4];
22 char model[16];
23}; 23};
24 24
25struct sysinfo_1_2_1 25struct sysinfo_1_2_1 {
26{
27 char reserved_0[80]; 26 char reserved_0[80];
28 char sequence[16]; 27 char sequence[16];
29 char plant[4]; 28 char plant[4];
@@ -31,9 +30,12 @@ struct sysinfo_1_2_1
31 unsigned short cpu_address; 30 unsigned short cpu_address;
32}; 31};
33 32
34struct sysinfo_1_2_2 33struct sysinfo_1_2_2 {
35{ 34 char format;
36 char reserved_0[32]; 35 char reserved_0[1];
36 unsigned short acc_offset;
37 char reserved_1[24];
38 unsigned int secondary_capability;
37 unsigned int capability; 39 unsigned int capability;
38 unsigned short cpus_total; 40 unsigned short cpus_total;
39 unsigned short cpus_configured; 41 unsigned short cpus_configured;
@@ -42,8 +44,12 @@ struct sysinfo_1_2_2
42 unsigned short adjustment[0]; 44 unsigned short adjustment[0];
43}; 45};
44 46
45struct sysinfo_2_2_1 47struct sysinfo_1_2_2_extension {
46{ 48 unsigned int alt_capability;
49 unsigned short alt_adjustment[0];
50};
51
52struct sysinfo_2_2_1 {
47 char reserved_0[80]; 53 char reserved_0[80];
48 char sequence[16]; 54 char sequence[16];
49 char plant[4]; 55 char plant[4];
@@ -51,15 +57,11 @@ struct sysinfo_2_2_1
51 unsigned short cpu_address; 57 unsigned short cpu_address;
52}; 58};
53 59
54struct sysinfo_2_2_2 60struct sysinfo_2_2_2 {
55{
56 char reserved_0[32]; 61 char reserved_0[32];
57 unsigned short lpar_number; 62 unsigned short lpar_number;
58 char reserved_1; 63 char reserved_1;
59 unsigned char characteristics; 64 unsigned char characteristics;
60 #define LPAR_CHAR_DEDICATED (1 << 7)
61 #define LPAR_CHAR_SHARED (1 << 6)
62 #define LPAR_CHAR_LIMITED (1 << 5)
63 unsigned short cpus_total; 65 unsigned short cpus_total;
64 unsigned short cpus_configured; 66 unsigned short cpus_configured;
65 unsigned short cpus_standby; 67 unsigned short cpus_standby;
@@ -71,12 +73,14 @@ struct sysinfo_2_2_2
71 unsigned short cpus_shared; 73 unsigned short cpus_shared;
72}; 74};
73 75
74struct sysinfo_3_2_2 76#define LPAR_CHAR_DEDICATED (1 << 7)
75{ 77#define LPAR_CHAR_SHARED (1 << 6)
78#define LPAR_CHAR_LIMITED (1 << 5)
79
80struct sysinfo_3_2_2 {
76 char reserved_0[31]; 81 char reserved_0[31];
77 unsigned char count; 82 unsigned char count;
78 struct 83 struct {
79 {
80 char reserved_0[4]; 84 char reserved_0[4];
81 unsigned short cpus_total; 85 unsigned short cpus_total;
82 unsigned short cpus_configured; 86 unsigned short cpus_configured;
@@ -90,136 +94,223 @@ struct sysinfo_3_2_2
90 } vm[8]; 94 } vm[8];
91}; 95};
92 96
93union s390_sysinfo 97static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
94{ 98{
95 struct sysinfo_1_1_1 sysinfo_1_1_1; 99 register int r0 asm("0") = (fc << 28) | sel1;
96 struct sysinfo_1_2_1 sysinfo_1_2_1; 100 register int r1 asm("1") = sel2;
97 struct sysinfo_1_2_2 sysinfo_1_2_2; 101
98 struct sysinfo_2_2_1 sysinfo_2_2_1; 102 asm volatile(
99 struct sysinfo_2_2_2 sysinfo_2_2_2; 103 " stsi 0(%2)\n"
100 struct sysinfo_3_2_2 sysinfo_3_2_2; 104 "0: jz 2f\n"
101}; 105 "1: lhi %0,%3\n"
102 106 "2:\n"
103static inline int stsi (void *sysinfo, 107 EX_TABLE(0b,1b)
104 int fc, int sel1, int sel2) 108 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
105{ 109 : "cc", "memory" );
106 int cc, retv; 110 return r0;
107
108#ifndef CONFIG_64BIT
109 __asm__ __volatile__ ( "lr\t0,%2\n"
110 "\tlr\t1,%3\n"
111 "\tstsi\t0(%4)\n"
112 "0:\tipm\t%0\n"
113 "\tsrl\t%0,28\n"
114 "1:lr\t%1,0\n"
115 ".section .fixup,\"ax\"\n"
116 "2:\tlhi\t%0,3\n"
117 "\tbras\t1,3f\n"
118 "\t.long 1b\n"
119 "3:\tl\t1,0(1)\n"
120 "\tbr\t1\n"
121 ".previous\n"
122 ".section __ex_table,\"a\"\n"
123 "\t.align 4\n"
124 "\t.long 0b,2b\n"
125 ".previous\n"
126 : "=d" (cc), "=d" (retv)
127 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
128 : "cc", "memory", "0", "1" );
129#else
130 __asm__ __volatile__ ( "lr\t0,%2\n"
131 "lr\t1,%3\n"
132 "\tstsi\t0(%4)\n"
133 "0:\tipm\t%0\n"
134 "\tsrl\t%0,28\n"
135 "1:lr\t%1,0\n"
136 ".section .fixup,\"ax\"\n"
137 "2:\tlhi\t%0,3\n"
138 "\tjg\t1b\n"
139 ".previous\n"
140 ".section __ex_table,\"a\"\n"
141 "\t.align 8\n"
142 "\t.quad 0b,2b\n"
143 ".previous\n"
144 : "=d" (cc), "=d" (retv)
145 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
146 : "cc", "memory", "0", "1" );
147#endif
148
149 return cc? -1 : retv;
150} 111}
151 112
152static inline int stsi_0 (void) 113static inline int stsi_0(void)
153{ 114{
154 int rc = stsi (NULL, 0, 0, 0); 115 int rc = stsi (NULL, 0, 0, 0);
155 return rc == -1 ? rc : (((unsigned int)rc) >> 28); 116 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
156} 117}
157 118
158static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info) 119static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
159{ 120{
160 int rc = stsi (info, 1, 1, 1); 121 if (stsi(info, 1, 1, 1) == -ENOSYS)
161 if (rc != -1) 122 return len;
162 { 123
163 EBCASC (info->manufacturer, sizeof(info->manufacturer)); 124 EBCASC(info->manufacturer, sizeof(info->manufacturer));
164 EBCASC (info->type, sizeof(info->type)); 125 EBCASC(info->type, sizeof(info->type));
165 EBCASC (info->model, sizeof(info->model)); 126 EBCASC(info->model, sizeof(info->model));
166 EBCASC (info->sequence, sizeof(info->sequence)); 127 EBCASC(info->sequence, sizeof(info->sequence));
167 EBCASC (info->plant, sizeof(info->plant)); 128 EBCASC(info->plant, sizeof(info->plant));
168 } 129 EBCASC(info->model_capacity, sizeof(info->model_capacity));
169 return rc == -1 ? rc : 0; 130 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
131 info->manufacturer);
132 len += sprintf(page + len, "Type: %-4.4s\n",
133 info->type);
134 if (info->model[0] != '\0')
135 /*
136 * Sigh: the model field has been renamed with System z9
137 * to model_capacity and a new model field has been added
138 * after the plant field. To avoid confusing older programs
139 * the "Model:" prints "model_capacity model" or just
140 * "model_capacity" if the model string is empty .
141 */
142 len += sprintf(page + len,
143 "Model: %-16.16s %-16.16s\n",
144 info->model_capacity, info->model);
145 else
146 len += sprintf(page + len, "Model: %-16.16s\n",
147 info->model_capacity);
148 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
149 info->sequence);
150 len += sprintf(page + len, "Plant: %-4.4s\n",
151 info->plant);
152 len += sprintf(page + len, "Model Capacity: %-16.16s\n",
153 info->model_capacity);
154 return len;
170} 155}
171 156
172static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info) 157#if 0 /* Currently unused */
158static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
173{ 159{
174 int rc = stsi (info, 1, 2, 1); 160 if (stsi(info, 1, 2, 1) == -ENOSYS)
175 if (rc != -1) 161 return len;
176 { 162
177 EBCASC (info->sequence, sizeof(info->sequence)); 163 len += sprintf(page + len, "\n");
178 EBCASC (info->plant, sizeof(info->plant)); 164 EBCASC(info->sequence, sizeof(info->sequence));
179 } 165 EBCASC(info->plant, sizeof(info->plant));
180 return rc == -1 ? rc : 0; 166 len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
167 info->sequence);
168 len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
169 info->plant);
170 return len;
181} 171}
172#endif
182 173
183static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info) 174static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
184{ 175{
185 int rc = stsi (info, 1, 2, 2); 176 struct sysinfo_1_2_2_extension *ext;
186 return rc == -1 ? rc : 0; 177 int i;
178
179 if (stsi(info, 1, 2, 2) == -ENOSYS)
180 return len;
181 ext = (struct sysinfo_1_2_2_extension *)
182 ((unsigned long) info + info->acc_offset);
183
184 len += sprintf(page + len, "\n");
185 len += sprintf(page + len, "CPUs Total: %d\n",
186 info->cpus_total);
187 len += sprintf(page + len, "CPUs Configured: %d\n",
188 info->cpus_configured);
189 len += sprintf(page + len, "CPUs Standby: %d\n",
190 info->cpus_standby);
191 len += sprintf(page + len, "CPUs Reserved: %d\n",
192 info->cpus_reserved);
193
194 if (info->format == 1) {
195 /*
196 * Sigh 2. According to the specification the alternate
197 * capability field is a 32 bit floating point number
198 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange
202 * encoding of the alternate cpu capability.
203 */
204 len += sprintf(page + len, "Capability: %u %u\n",
205 info->capability, ext->alt_capability);
206 for (i = 2; i <= info->cpus_total; i++)
207 len += sprintf(page + len,
208 "Adjustment %02d-way: %u %u\n",
209 i, info->adjustment[i-2],
210 ext->alt_adjustment[i-2]);
211
212 } else {
213 len += sprintf(page + len, "Capability: %u\n",
214 info->capability);
215 for (i = 2; i <= info->cpus_total; i++)
216 len += sprintf(page + len,
217 "Adjustment %02d-way: %u\n",
218 i, info->adjustment[i-2]);
219 }
220
221 if (info->secondary_capability != 0)
222 len += sprintf(page + len, "Secondary Capability: %d\n",
223 info->secondary_capability);
224
225 return len;
187} 226}
188 227
189static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info) 228#if 0 /* Currently unused */
229static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
190{ 230{
191 int rc = stsi (info, 2, 2, 1); 231 if (stsi(info, 2, 2, 1) == -ENOSYS)
192 if (rc != -1) 232 return len;
193 { 233
194 EBCASC (info->sequence, sizeof(info->sequence)); 234 len += sprintf(page + len, "\n");
195 EBCASC (info->plant, sizeof(info->plant)); 235 EBCASC (info->sequence, sizeof(info->sequence));
196 } 236 EBCASC (info->plant, sizeof(info->plant));
197 return rc == -1 ? rc : 0; 237 len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
238 info->sequence);
239 len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
240 info->plant);
241 return len;
198} 242}
243#endif
199 244
200static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info) 245static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
201{ 246{
202 int rc = stsi (info, 2, 2, 2); 247 if (stsi(info, 2, 2, 2) == -ENOSYS)
203 if (rc != -1) 248 return len;
204 { 249
205 EBCASC (info->name, sizeof(info->name)); 250 EBCASC (info->name, sizeof(info->name));
206 } 251
207 return rc == -1 ? rc : 0; 252 len += sprintf(page + len, "\n");
253 len += sprintf(page + len, "LPAR Number: %d\n",
254 info->lpar_number);
255
256 len += sprintf(page + len, "LPAR Characteristics: ");
257 if (info->characteristics & LPAR_CHAR_DEDICATED)
258 len += sprintf(page + len, "Dedicated ");
259 if (info->characteristics & LPAR_CHAR_SHARED)
260 len += sprintf(page + len, "Shared ");
261 if (info->characteristics & LPAR_CHAR_LIMITED)
262 len += sprintf(page + len, "Limited ");
263 len += sprintf(page + len, "\n");
264
265 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
266 info->name);
267
268 len += sprintf(page + len, "LPAR Adjustment: %d\n",
269 info->caf);
270
271 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
272 info->cpus_total);
273 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
274 info->cpus_configured);
275 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
276 info->cpus_standby);
277 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
278 info->cpus_reserved);
279 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
280 info->cpus_dedicated);
281 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
282 info->cpus_shared);
283 return len;
208} 284}
209 285
210static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info) 286static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
211{ 287{
212 int rc = stsi (info, 3, 2, 2); 288 int i;
213 if (rc != -1) 289
214 { 290 if (stsi(info, 3, 2, 2) == -ENOSYS)
215 int i; 291 return len;
216 for (i = 0; i < info->count; i++) 292 for (i = 0; i < info->count; i++) {
217 { 293 EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
218 EBCASC (info->vm[i].name, sizeof(info->vm[i].name)); 294 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
219 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi)); 295 len += sprintf(page + len, "\n");
220 } 296 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
297 i, info->vm[i].name);
298 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
299 i, info->vm[i].cpi);
300
301 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
302 i, info->vm[i].caf);
303
304 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
305 i, info->vm[i].cpus_total);
306 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
307 i, info->vm[i].cpus_configured);
308 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
309 i, info->vm[i].cpus_standby);
310 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
311 i, info->vm[i].cpus_reserved);
221 } 312 }
222 return rc == -1 ? rc : 0; 313 return len;
223} 314}
224 315
225 316
@@ -227,118 +318,34 @@ static int proc_read_sysinfo(char *page, char **start,
227 off_t off, int count, 318 off_t off, int count,
228 int *eof, void *data) 319 int *eof, void *data)
229{ 320{
230 unsigned long info_page = get_zeroed_page (GFP_KERNEL); 321 unsigned long info = get_zeroed_page (GFP_KERNEL);
231 union s390_sysinfo *info = (union s390_sysinfo *) info_page; 322 int level, len;
232 int len = 0;
233 int level;
234 int i;
235 323
236 if (!info) 324 if (!info)
237 return 0; 325 return 0;
238 326
239 level = stsi_0 (); 327 len = 0;
240 328 level = stsi_0();
241 if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0) 329 if (level >= 1)
242 { 330 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
243 len += sprintf (page+len, "Manufacturer: %-16.16s\n",
244 info->sysinfo_1_1_1.manufacturer);
245 len += sprintf (page+len, "Type: %-4.4s\n",
246 info->sysinfo_1_1_1.type);
247 len += sprintf (page+len, "Model: %-16.16s\n",
248 info->sysinfo_1_1_1.model);
249 len += sprintf (page+len, "Sequence Code: %-16.16s\n",
250 info->sysinfo_1_1_1.sequence);
251 len += sprintf (page+len, "Plant: %-4.4s\n",
252 info->sysinfo_1_1_1.plant);
253 }
254
255 if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
256 {
257 len += sprintf (page+len, "\n");
258 len += sprintf (page+len, "CPUs Total: %d\n",
259 info->sysinfo_1_2_2.cpus_total);
260 len += sprintf (page+len, "CPUs Configured: %d\n",
261 info->sysinfo_1_2_2.cpus_configured);
262 len += sprintf (page+len, "CPUs Standby: %d\n",
263 info->sysinfo_1_2_2.cpus_standby);
264 len += sprintf (page+len, "CPUs Reserved: %d\n",
265 info->sysinfo_1_2_2.cpus_reserved);
266
267 len += sprintf (page+len, "Capability: %d\n",
268 info->sysinfo_1_2_2.capability);
269 331
270 for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++) 332 if (level >= 1)
271 len += sprintf (page+len, "Adjustment %02d-way: %d\n", 333 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
272 i, info->sysinfo_1_2_2.adjustment[i-2]);
273 }
274 334
275 if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0) 335 if (level >= 2)
276 { 336 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
277 len += sprintf (page+len, "\n");
278 len += sprintf (page+len, "LPAR Number: %d\n",
279 info->sysinfo_2_2_2.lpar_number);
280
281 len += sprintf (page+len, "LPAR Characteristics: ");
282 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
283 len += sprintf (page+len, "Dedicated ");
284 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
285 len += sprintf (page+len, "Shared ");
286 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
287 len += sprintf (page+len, "Limited ");
288 len += sprintf (page+len, "\n");
289
290 len += sprintf (page+len, "LPAR Name: %-8.8s\n",
291 info->sysinfo_2_2_2.name);
292
293 len += sprintf (page+len, "LPAR Adjustment: %d\n",
294 info->sysinfo_2_2_2.caf);
295
296 len += sprintf (page+len, "LPAR CPUs Total: %d\n",
297 info->sysinfo_2_2_2.cpus_total);
298 len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
299 info->sysinfo_2_2_2.cpus_configured);
300 len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
301 info->sysinfo_2_2_2.cpus_standby);
302 len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
303 info->sysinfo_2_2_2.cpus_reserved);
304 len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
305 info->sysinfo_2_2_2.cpus_dedicated);
306 len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
307 info->sysinfo_2_2_2.cpus_shared);
308 }
309 337
310 if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0) 338 if (level >= 3)
311 { 339 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
312 for (i = 0; i < info->sysinfo_3_2_2.count; i++)
313 {
314 len += sprintf (page+len, "\n");
315 len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
316 i, info->sysinfo_3_2_2.vm[i].name);
317 len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
318 i, info->sysinfo_3_2_2.vm[i].cpi);
319
320 len += sprintf (page+len, "VM%02d Adjustment: %d\n",
321 i, info->sysinfo_3_2_2.vm[i].caf);
322
323 len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
324 i, info->sysinfo_3_2_2.vm[i].cpus_total);
325 len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
326 i, info->sysinfo_3_2_2.vm[i].cpus_configured);
327 len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
328 i, info->sysinfo_3_2_2.vm[i].cpus_standby);
329 len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
330 i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
331 }
332 }
333 340
334 free_page (info_page); 341 free_page (info);
335 return len; 342 return len;
336} 343}
337 344
338static __init int create_proc_sysinfo(void) 345static __init int create_proc_sysinfo(void)
339{ 346{
340 create_proc_read_entry ("sysinfo", 0444, NULL, 347 create_proc_read_entry("sysinfo", 0444, NULL,
341 proc_read_sysinfo, NULL); 348 proc_read_sysinfo, NULL);
342 return 0; 349 return 0;
343} 350}
344 351