aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig24
-rw-r--r--drivers/s390/block/dasd.c376
-rw-r--r--drivers/s390/block/dasd_3990_erp.c27
-rw-r--r--drivers/s390/block/dasd_alias.c10
-rw-r--r--drivers/s390/block/dasd_devmap.c187
-rw-r--r--drivers/s390/block/dasd_diag.c28
-rw-r--r--drivers/s390/block/dasd_diag.h4
-rw-r--r--drivers/s390/block/dasd_eckd.c810
-rw-r--r--drivers/s390/block/dasd_eckd.h18
-rw-r--r--drivers/s390/block/dasd_eer.c3
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c27
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/block/dasd_ioctl.c143
-rw-r--r--drivers/s390/block/dasd_proc.c1
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/Kconfig77
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/con3215.c8
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/keyboard.c7
-rw-r--r--drivers/s390/char/keyboard.h2
-rw-r--r--drivers/s390/char/monreader.c1
-rw-r--r--drivers/s390/char/monwriter.c5
-rw-r--r--drivers/s390/char/raw3270.c12
-rw-r--r--drivers/s390/char/sclp.c39
-rw-r--r--drivers/s390/char/sclp.h24
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_config.c15
-rw-r--r--drivers/s390/char/sclp_ocf.c145
-rw-r--r--drivers/s390/char/sclp_sdias.c3
-rw-r--r--drivers/s390/char/sclp_tty.c122
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c65
-rw-r--r--drivers/s390/char/tape_3590.c124
-rw-r--r--drivers/s390/char/tape_block.c444
-rw-r--r--drivers/s390/char/tape_char.c4
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/char/tape_core.c79
-rw-r--r--drivers/s390/char/tape_std.c7
-rw-r--r--drivers/s390/char/tty3270.c16
-rw-r--r--drivers/s390/char/vmcp.c7
-rw-r--r--drivers/s390/char/vmlogrdr.c42
-rw-r--r--drivers/s390/char/vmur.c9
-rw-r--r--drivers/s390/char/vmwatchdog.c1
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c84
-rw-r--r--drivers/s390/cio/chp.c41
-rw-r--r--drivers/s390/cio/chp.h12
-rw-r--r--drivers/s390/cio/chsc.c345
-rw-r--r--drivers/s390/cio/chsc.h46
-rw-r--r--drivers/s390/cio/chsc_sch.c30
-rw-r--r--drivers/s390/cio/cio.c45
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c67
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c88
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c53
-rw-r--r--drivers/s390/cio/device_ops.c43
-rw-r--r--drivers/s390/cio/device_pgid.c23
-rw-r--r--drivers/s390/cio/io_sch.h121
-rw-r--r--drivers/s390/cio/ioasm.h34
-rw-r--r--drivers/s390/cio/itcw.c62
-rw-r--r--drivers/s390/cio/orb.h67
-rw-r--r--drivers/s390/cio/qdio.h56
-rw-r--r--drivers/s390/cio/qdio_debug.c34
-rw-r--r--drivers/s390/cio/qdio_main.c351
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/cio/qdio_thinint.c104
-rw-r--r--drivers/s390/crypto/ap_bus.c82
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c16
-rw-r--r--drivers/s390/crypto/zcrypt_api.h3
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c82
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h25
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c79
-rw-r--r--drivers/s390/net/Kconfig53
-rw-r--r--drivers/s390/net/claw.c38
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_main.c16
-rw-r--r--drivers/s390/net/ctcm_mpc.c15
-rw-r--r--drivers/s390/net/lcs.c38
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h52
-rw-r--r--drivers/s390/net/qeth_core_main.c508
-rw-r--r--drivers/s390/net/qeth_core_mpc.h19
-rw-r--r--drivers/s390/net/qeth_core_sys.c63
-rw-r--r--drivers/s390/net/qeth_l2_main.c272
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c796
-rw-r--r--drivers/s390/net/qeth_l3_sys.c103
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/s390/scsi/Makefile5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c213
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c37
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c189
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c1179
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h503
-rw-r--r--drivers/s390/scsi/zfcp_def.h93
-rw-r--r--drivers/s390/scsi/zfcp_erp.c741
-rw-r--r--drivers/s390/scsi/zfcp_ext.h116
-rw-r--r--drivers/s390/scsi/zfcp_fc.c355
-rw-r--r--drivers/s390/scsi/zfcp_fc.h126
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c758
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c49
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c272
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c224
-rw-r--r--drivers/s390/scsi/zfcp_unit.c244
116 files changed, 6640 insertions, 5511 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 07883197f474..8e477bb1f3f6 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,7 +2,8 @@ comment "S/390 block device drivers"
2 depends on S390 && BLOCK 2 depends on S390 && BLOCK
3 3
4config BLK_DEV_XPRAM 4config BLK_DEV_XPRAM
5 tristate "XPRAM disk support" 5 def_tristate m
6 prompt "XPRAM disk support"
6 depends on S390 && BLOCK 7 depends on S390 && BLOCK
7 help 8 help
8 Select this option if you want to use your expanded storage on S/390 9 Select this option if you want to use your expanded storage on S/390
@@ -12,13 +13,15 @@ config BLK_DEV_XPRAM
12 xpram. If unsure, say "N". 13 xpram. If unsure, say "N".
13 14
14config DCSSBLK 15config DCSSBLK
15 tristate "DCSSBLK support" 16 def_tristate m
17 prompt "DCSSBLK support"
16 depends on S390 && BLOCK 18 depends on S390 && BLOCK
17 help 19 help
18 Support for dcss block device 20 Support for dcss block device
19 21
20config DASD 22config DASD
21 tristate "Support for DASD devices" 23 def_tristate y
24 prompt "Support for DASD devices"
22 depends on CCW && BLOCK 25 depends on CCW && BLOCK
23 select IOSCHED_DEADLINE 26 select IOSCHED_DEADLINE
24 help 27 help
@@ -27,28 +30,32 @@ config DASD
27 natively on a single image or an LPAR. 30 natively on a single image or an LPAR.
28 31
29config DASD_PROFILE 32config DASD_PROFILE
30 bool "Profiling support for dasd devices" 33 def_bool y
34 prompt "Profiling support for dasd devices"
31 depends on DASD 35 depends on DASD
32 help 36 help
33 Enable this option if you want to see profiling information 37 Enable this option if you want to see profiling information
34 in /proc/dasd/statistics. 38 in /proc/dasd/statistics.
35 39
36config DASD_ECKD 40config DASD_ECKD
37 tristate "Support for ECKD Disks" 41 def_tristate y
42 prompt "Support for ECKD Disks"
38 depends on DASD 43 depends on DASD
39 help 44 help
40 ECKD devices are the most commonly used devices. You should enable 45 ECKD devices are the most commonly used devices. You should enable
41 this option unless you are very sure to have no ECKD device. 46 this option unless you are very sure to have no ECKD device.
42 47
43config DASD_FBA 48config DASD_FBA
44 tristate "Support for FBA Disks" 49 def_tristate y
50 prompt "Support for FBA Disks"
45 depends on DASD 51 depends on DASD
46 help 52 help
47 Select this option to be able to access FBA devices. It is safe to 53 Select this option to be able to access FBA devices. It is safe to
48 say "Y". 54 say "Y".
49 55
50config DASD_DIAG 56config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 57 def_tristate y
58 prompt "Support for DIAG access to Disks"
52 depends on DASD 59 depends on DASD
53 help 60 help
54 Select this option if you want to use Diagnose250 command to access 61 Select this option if you want to use Diagnose250 command to access
@@ -56,7 +63,8 @@ config DASD_DIAG
56 say "N". 63 say "N".
57 64
58config DASD_EER 65config DASD_EER
59 bool "Extended error reporting (EER)" 66 def_bool y
67 prompt "Extended error reporting (EER)"
60 depends on DASD 68 depends on DASD
61 help 69 help
62 This driver provides a character device interface to the 70 This driver provides a character device interface to the
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8373ca0de8e0..86b6f1cc1b10 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,6 +11,7 @@
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/kmod.h> 15#include <linux/kmod.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -21,7 +22,6 @@
21#include <linux/hdreg.h> 22#include <linux/hdreg.h>
22#include <linux/async.h> 23#include <linux/async.h>
23#include <linux/mutex.h> 24#include <linux/mutex.h>
24#include <linux/smp_lock.h>
25 25
26#include <asm/ccwdev.h> 26#include <asm/ccwdev.h>
27#include <asm/ebcdic.h> 27#include <asm/ebcdic.h>
@@ -369,6 +369,11 @@ dasd_state_ready_to_online(struct dasd_device * device)
369 device->state = DASD_STATE_ONLINE; 369 device->state = DASD_STATE_ONLINE;
370 if (device->block) { 370 if (device->block) {
371 dasd_schedule_block_bh(device->block); 371 dasd_schedule_block_bh(device->block);
372 if ((device->features & DASD_FEATURE_USERAW)) {
373 disk = device->block->gdp;
374 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
375 return 0;
376 }
372 disk = device->block->bdev->bd_disk; 377 disk = device->block->bdev->bd_disk;
373 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 378 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
374 while ((part = disk_part_iter_next(&piter))) 379 while ((part = disk_part_iter_next(&piter)))
@@ -394,7 +399,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
394 return rc; 399 return rc;
395 } 400 }
396 device->state = DASD_STATE_READY; 401 device->state = DASD_STATE_READY;
397 if (device->block) { 402 if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
398 disk = device->block->bdev->bd_disk; 403 disk = device->block->bdev->bd_disk;
399 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 404 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
400 while ((part = disk_part_iter_next(&piter))) 405 while ((part = disk_part_iter_next(&piter)))
@@ -745,10 +750,6 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
745 char *data; 750 char *data;
746 int size; 751 int size;
747 752
748 /* Sanity checks */
749 BUG_ON(datasize > PAGE_SIZE ||
750 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
751
752 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 753 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
753 if (cplength > 0) 754 if (cplength > 0)
754 size += cplength * sizeof(struct ccw1); 755 size += cplength * sizeof(struct ccw1);
@@ -854,7 +855,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
854 rc = ccw_device_clear(device->cdev, (long) cqr); 855 rc = ccw_device_clear(device->cdev, (long) cqr);
855 switch (rc) { 856 switch (rc) {
856 case 0: /* termination successful */ 857 case 0: /* termination successful */
857 cqr->retries--;
858 cqr->status = DASD_CQR_CLEAR_PENDING; 858 cqr->status = DASD_CQR_CLEAR_PENDING;
859 cqr->stopclk = get_clock(); 859 cqr->stopclk = get_clock();
860 cqr->starttime = 0; 860 cqr->starttime = 0;
@@ -906,6 +906,16 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
906 return rc; 906 return rc;
907 } 907 }
908 device = (struct dasd_device *) cqr->startdev; 908 device = (struct dasd_device *) cqr->startdev;
909 if (((cqr->block &&
910 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
911 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
912 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
913 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
914 "because of stolen lock", cqr);
915 cqr->status = DASD_CQR_ERROR;
916 cqr->intrc = -EPERM;
917 return -EPERM;
918 }
909 if (cqr->retries < 0) { 919 if (cqr->retries < 0) {
910 /* internal error 14 - start_IO run out of retries */ 920 /* internal error 14 - start_IO run out of retries */
911 sprintf(errorstring, "14 %p", cqr); 921 sprintf(errorstring, "14 %p", cqr);
@@ -917,6 +927,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
917 cqr->startclk = get_clock(); 927 cqr->startclk = get_clock();
918 cqr->starttime = jiffies; 928 cqr->starttime = jiffies;
919 cqr->retries--; 929 cqr->retries--;
930 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
931 cqr->lpm &= device->path_data.opm;
932 if (!cqr->lpm)
933 cqr->lpm = device->path_data.opm;
934 }
920 if (cqr->cpmode == 1) { 935 if (cqr->cpmode == 1) {
921 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 936 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
922 (long) cqr, cqr->lpm); 937 (long) cqr, cqr->lpm);
@@ -929,35 +944,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
929 cqr->status = DASD_CQR_IN_IO; 944 cqr->status = DASD_CQR_IN_IO;
930 break; 945 break;
931 case -EBUSY: 946 case -EBUSY:
932 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 947 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
933 "start_IO: device busy, retry later"); 948 "start_IO: device busy, retry later");
934 break; 949 break;
935 case -ETIMEDOUT: 950 case -ETIMEDOUT:
936 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 951 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
937 "start_IO: request timeout, retry later"); 952 "start_IO: request timeout, retry later");
938 break; 953 break;
939 case -EACCES: 954 case -EACCES:
940 /* -EACCES indicates that the request used only a 955 /* -EACCES indicates that the request used only a subset of the
941 * subset of the available pathes and all these 956 * available paths and all these paths are gone. If the lpm of
942 * pathes are gone. 957 * this request was only a subset of the opm (e.g. the ppm) then
943 * Do a retry with all available pathes. 958 * we just do a retry with all available paths.
959 * If we already use the full opm, something is amiss, and we
960 * need a full path verification.
944 */ 961 */
945 cqr->lpm = LPM_ANYPATH; 962 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
946 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 963 DBF_DEV_EVENT(DBF_WARNING, device,
947 "start_IO: selected pathes gone," 964 "start_IO: selected paths gone (%x)",
948 " retry on all pathes"); 965 cqr->lpm);
966 } else if (cqr->lpm != device->path_data.opm) {
967 cqr->lpm = device->path_data.opm;
968 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
969 "start_IO: selected paths gone,"
970 " retry on all paths");
971 } else {
972 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
973 "start_IO: all paths in opm gone,"
974 " do path verification");
975 dasd_generic_last_path_gone(device);
976 device->path_data.opm = 0;
977 device->path_data.ppm = 0;
978 device->path_data.npm = 0;
979 device->path_data.tbvpm =
980 ccw_device_get_path_mask(device->cdev);
981 }
949 break; 982 break;
950 case -ENODEV: 983 case -ENODEV:
951 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 984 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
952 "start_IO: -ENODEV device gone, retry"); 985 "start_IO: -ENODEV device gone, retry");
953 break; 986 break;
954 case -EIO: 987 case -EIO:
955 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 988 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
956 "start_IO: -EIO device gone, retry"); 989 "start_IO: -EIO device gone, retry");
957 break; 990 break;
958 case -EINVAL: 991 case -EINVAL:
959 /* most likely caused in power management context */ 992 /* most likely caused in power management context */
960 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 993 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
961 "start_IO: -EINVAL device currently " 994 "start_IO: -EINVAL device currently "
962 "not accessible"); 995 "not accessible");
963 break; 996 break;
@@ -1077,6 +1110,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1077 unsigned long long now; 1110 unsigned long long now;
1078 int expires; 1111 int expires;
1079 1112
1113 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
1080 if (IS_ERR(irb)) { 1114 if (IS_ERR(irb)) {
1081 switch (PTR_ERR(irb)) { 1115 switch (PTR_ERR(irb)) {
1082 case -EIO: 1116 case -EIO:
@@ -1095,23 +1129,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1095 } 1129 }
1096 1130
1097 now = get_clock(); 1131 now = get_clock();
1098
1099 /* check for unsolicited interrupts */
1100 cqr = (struct dasd_ccw_req *) intparm; 1132 cqr = (struct dasd_ccw_req *) intparm;
1101 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1133 /* check for conditions that should be handled immediately */
1102 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1134 if (!cqr ||
1103 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { 1135 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1104 if (cqr && cqr->status == DASD_CQR_IN_IO) 1136 scsw_cstat(&irb->scsw) == 0)) {
1105 cqr->status = DASD_CQR_QUEUED; 1137 if (cqr)
1138 memcpy(&cqr->irb, irb, sizeof(*irb));
1106 device = dasd_device_from_cdev_locked(cdev); 1139 device = dasd_device_from_cdev_locked(cdev);
1107 if (!IS_ERR(device)) { 1140 if (IS_ERR(device))
1108 dasd_device_clear_timer(device); 1141 return;
1109 device->discipline->handle_unsolicited_interrupt(device, 1142 /* ignore unsolicited interrupts for DIAG discipline */
1110 irb); 1143 if (device->discipline == dasd_diag_discipline_pointer) {
1111 dasd_put_device(device); 1144 dasd_put_device(device);
1145 return;
1112 } 1146 }
1113 return; 1147 device->discipline->dump_sense_dbf(device, irb, "int");
1148 if (device->features & DASD_FEATURE_ERPLOG)
1149 device->discipline->dump_sense(device, cqr, irb);
1150 device->discipline->check_for_device_change(device, cqr, irb);
1151 dasd_put_device(device);
1114 } 1152 }
1153 if (!cqr)
1154 return;
1115 1155
1116 device = (struct dasd_device *) cqr->startdev; 1156 device = (struct dasd_device *) cqr->startdev;
1117 if (!device || 1157 if (!device ||
@@ -1151,25 +1191,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1151 struct dasd_ccw_req, devlist); 1191 struct dasd_ccw_req, devlist);
1152 } 1192 }
1153 } else { /* error */ 1193 } else { /* error */
1154 memcpy(&cqr->irb, irb, sizeof(struct irb));
1155 /* log sense for every failed I/O to s390 debugfeature */
1156 dasd_log_sense_dbf(cqr, irb);
1157 if (device->features & DASD_FEATURE_ERPLOG) {
1158 dasd_log_sense(cqr, irb);
1159 }
1160
1161 /* 1194 /*
1162 * If we don't want complex ERP for this request, then just 1195 * If we don't want complex ERP for this request, then just
1163 * reset this and retry it in the fastpath 1196 * reset this and retry it in the fastpath
1164 */ 1197 */
1165 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1198 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1166 cqr->retries > 0) { 1199 cqr->retries > 0) {
1167 if (cqr->lpm == LPM_ANYPATH) 1200 if (cqr->lpm == device->path_data.opm)
1168 DBF_DEV_EVENT(DBF_DEBUG, device, 1201 DBF_DEV_EVENT(DBF_DEBUG, device,
1169 "default ERP in fastpath " 1202 "default ERP in fastpath "
1170 "(%i retries left)", 1203 "(%i retries left)",
1171 cqr->retries); 1204 cqr->retries);
1172 cqr->lpm = LPM_ANYPATH; 1205 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1206 cqr->lpm = device->path_data.opm;
1173 cqr->status = DASD_CQR_QUEUED; 1207 cqr->status = DASD_CQR_QUEUED;
1174 next = cqr; 1208 next = cqr;
1175 } else 1209 } else
@@ -1197,13 +1231,13 @@ enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1197 goto out; 1231 goto out;
1198 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1232 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1199 device->state != device->target || 1233 device->state != device->target ||
1200 !device->discipline->handle_unsolicited_interrupt){ 1234 !device->discipline->check_for_device_change){
1201 dasd_put_device(device); 1235 dasd_put_device(device);
1202 goto out; 1236 goto out;
1203 } 1237 }
1204 1238 if (device->discipline->dump_sense_dbf)
1205 dasd_device_clear_timer(device); 1239 device->discipline->dump_sense_dbf(device, irb, "uc");
1206 device->discipline->handle_unsolicited_interrupt(device, irb); 1240 device->discipline->check_for_device_change(device, NULL, irb);
1207 dasd_put_device(device); 1241 dasd_put_device(device);
1208out: 1242out:
1209 return UC_TODO_RETRY; 1243 return UC_TODO_RETRY;
@@ -1353,8 +1387,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
1353 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1387 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1354 if (cqr->status != DASD_CQR_QUEUED) 1388 if (cqr->status != DASD_CQR_QUEUED)
1355 return; 1389 return;
1356 /* when device is stopped, return request to previous layer */ 1390 /* when device is stopped, return request to previous layer
1357 if (device->stopped) { 1391 * exception: only the disconnect or unresumed bits are set and the
1392 * cqr is a path verification request
1393 */
1394 if (device->stopped &&
1395 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1396 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
1397 cqr->intrc = -EAGAIN;
1358 cqr->status = DASD_CQR_CLEARED; 1398 cqr->status = DASD_CQR_CLEARED;
1359 dasd_schedule_device_bh(device); 1399 dasd_schedule_device_bh(device);
1360 return; 1400 return;
@@ -1370,6 +1410,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
1370 dasd_device_set_timer(device, 50); 1410 dasd_device_set_timer(device, 50);
1371} 1411}
1372 1412
1413static void __dasd_device_check_path_events(struct dasd_device *device)
1414{
1415 int rc;
1416
1417 if (device->path_data.tbvpm) {
1418 if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
1419 DASD_UNRESUMED_PM))
1420 return;
1421 rc = device->discipline->verify_path(
1422 device, device->path_data.tbvpm);
1423 if (rc)
1424 dasd_device_set_timer(device, 50);
1425 else
1426 device->path_data.tbvpm = 0;
1427 }
1428};
1429
1373/* 1430/*
1374 * Go through all request on the dasd_device request queue, 1431 * Go through all request on the dasd_device request queue,
1375 * terminate them on the cdev if necessary, and return them to the 1432 * terminate them on the cdev if necessary, and return them to the
@@ -1444,6 +1501,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
1444 __dasd_device_check_expire(device); 1501 __dasd_device_check_expire(device);
1445 /* find final requests on ccw queue */ 1502 /* find final requests on ccw queue */
1446 __dasd_device_process_ccw_queue(device, &final_queue); 1503 __dasd_device_process_ccw_queue(device, &final_queue);
1504 __dasd_device_check_path_events(device);
1447 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1505 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1448 /* Now call the callback function of requests with final status */ 1506 /* Now call the callback function of requests with final status */
1449 __dasd_device_process_final_queue(device, &final_queue); 1507 __dasd_device_process_final_queue(device, &final_queue);
@@ -1600,7 +1658,12 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1600 continue; 1658 continue;
1601 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1659 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1602 continue; 1660 continue;
1603 1661 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1662 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1663 cqr->status = DASD_CQR_FAILED;
1664 cqr->intrc = -EPERM;
1665 continue;
1666 }
1604 /* Non-temporary stop condition will trigger fail fast */ 1667 /* Non-temporary stop condition will trigger fail fast */
1605 if (device->stopped & ~DASD_STOPPED_PENDING && 1668 if (device->stopped & ~DASD_STOPPED_PENDING &&
1606 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1669 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -1608,7 +1671,6 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1608 cqr->status = DASD_CQR_FAILED; 1671 cqr->status = DASD_CQR_FAILED;
1609 continue; 1672 continue;
1610 } 1673 }
1611
1612 /* Don't try to start requests if device is stopped */ 1674 /* Don't try to start requests if device is stopped */
1613 if (interruptible) { 1675 if (interruptible) {
1614 rc = wait_event_interruptible( 1676 rc = wait_event_interruptible(
@@ -1680,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1680static inline int _dasd_term_running_cqr(struct dasd_device *device) 1742static inline int _dasd_term_running_cqr(struct dasd_device *device)
1681{ 1743{
1682 struct dasd_ccw_req *cqr; 1744 struct dasd_ccw_req *cqr;
1745 int rc;
1683 1746
1684 if (list_empty(&device->ccw_queue)) 1747 if (list_empty(&device->ccw_queue))
1685 return 0; 1748 return 0;
1686 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1749 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1687 return device->discipline->term_IO(cqr); 1750 rc = device->discipline->term_IO(cqr);
1751 if (!rc)
1752 /*
1753 * CQR terminated because a more important request is pending.
1754 * Undo decreasing of retry counter because this is
1755 * not an error case.
1756 */
1757 cqr->retries++;
1758 return rc;
1688} 1759}
1689 1760
1690int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1761int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
@@ -1693,13 +1764,18 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1693 int rc; 1764 int rc;
1694 1765
1695 device = cqr->startdev; 1766 device = cqr->startdev;
1767 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1768 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1769 cqr->status = DASD_CQR_FAILED;
1770 cqr->intrc = -EPERM;
1771 return -EIO;
1772 }
1696 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1773 spin_lock_irq(get_ccwdev_lock(device->cdev));
1697 rc = _dasd_term_running_cqr(device); 1774 rc = _dasd_term_running_cqr(device);
1698 if (rc) { 1775 if (rc) {
1699 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1776 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1700 return rc; 1777 return rc;
1701 } 1778 }
1702
1703 cqr->callback = dasd_wakeup_cb; 1779 cqr->callback = dasd_wakeup_cb;
1704 cqr->callback_data = DASD_SLEEPON_START_TAG; 1780 cqr->callback_data = DASD_SLEEPON_START_TAG;
1705 cqr->status = DASD_CQR_QUEUED; 1781 cqr->status = DASD_CQR_QUEUED;
@@ -1850,7 +1926,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1850 return; 1926 return;
1851 } 1927 }
1852 /* Now we try to fetch requests from the request queue */ 1928 /* Now we try to fetch requests from the request queue */
1853 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1929 while ((req = blk_peek_request(queue))) {
1854 if (basedev->features & DASD_FEATURE_READONLY && 1930 if (basedev->features & DASD_FEATURE_READONLY &&
1855 rq_data_dir(req) == WRITE) { 1931 rq_data_dir(req) == WRITE) {
1856 DBF_DEV_EVENT(DBF_ERR, basedev, 1932 DBF_DEV_EVENT(DBF_ERR, basedev,
@@ -2003,6 +2079,13 @@ static void __dasd_block_start_head(struct dasd_block *block)
2003 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2079 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2004 if (cqr->status != DASD_CQR_FILLED) 2080 if (cqr->status != DASD_CQR_FILLED)
2005 continue; 2081 continue;
2082 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2083 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2084 cqr->status = DASD_CQR_FAILED;
2085 cqr->intrc = -EPERM;
2086 dasd_schedule_block_bh(block);
2087 continue;
2088 }
2006 /* Non-temporary stop condition will trigger fail fast */ 2089 /* Non-temporary stop condition will trigger fail fast */
2007 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2090 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2008 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2091 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -2188,8 +2271,20 @@ static void dasd_setup_queue(struct dasd_block *block)
2188{ 2271{
2189 int max; 2272 int max;
2190 2273
2191 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2274 if (block->base->features & DASD_FEATURE_USERAW) {
2192 max = block->base->discipline->max_blocks << block->s2b_shift; 2275 /*
2276 * the max_blocks value for raw_track access is 256
2277 * it is higher than the native ECKD value because we
2278 * only need one ccw per track
2279 * so the max_hw_sectors are
2280 * 2048 x 512B = 1024kB = 16 tracks
2281 */
2282 max = 2048;
2283 } else {
2284 max = block->base->discipline->max_blocks << block->s2b_shift;
2285 }
2286 blk_queue_logical_block_size(block->request_queue,
2287 block->bp_block);
2193 blk_queue_max_hw_sectors(block->request_queue, max); 2288 blk_queue_max_hw_sectors(block->request_queue, max);
2194 blk_queue_max_segments(block->request_queue, -1L); 2289 blk_queue_max_segments(block->request_queue, -1L);
2195 /* with page sized segments we can translate each segement into 2290 /* with page sized segments we can translate each segement into
@@ -2197,7 +2292,6 @@ static void dasd_setup_queue(struct dasd_block *block)
2197 */ 2292 */
2198 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2293 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2199 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2294 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2200 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
2201} 2295}
2202 2296
2203/* 2297/*
@@ -2229,16 +2323,14 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2229 2323
2230static int dasd_open(struct block_device *bdev, fmode_t mode) 2324static int dasd_open(struct block_device *bdev, fmode_t mode)
2231{ 2325{
2232 struct dasd_block *block = bdev->bd_disk->private_data;
2233 struct dasd_device *base; 2326 struct dasd_device *base;
2234 int rc; 2327 int rc;
2235 2328
2236 if (!block) 2329 base = dasd_device_from_gendisk(bdev->bd_disk);
2330 if (!base)
2237 return -ENODEV; 2331 return -ENODEV;
2238 2332
2239 lock_kernel(); 2333 atomic_inc(&base->block->open_count);
2240 base = block->base;
2241 atomic_inc(&block->open_count);
2242 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2334 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2243 rc = -ENODEV; 2335 rc = -ENODEV;
2244 goto unlock; 2336 goto unlock;
@@ -2271,25 +2363,28 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
2271 goto out; 2363 goto out;
2272 } 2364 }
2273 2365
2274 unlock_kernel(); 2366 dasd_put_device(base);
2275 return 0; 2367 return 0;
2276 2368
2277out: 2369out:
2278 module_put(base->discipline->owner); 2370 module_put(base->discipline->owner);
2279unlock: 2371unlock:
2280 atomic_dec(&block->open_count); 2372 atomic_dec(&base->block->open_count);
2281 unlock_kernel(); 2373 dasd_put_device(base);
2282 return rc; 2374 return rc;
2283} 2375}
2284 2376
2285static int dasd_release(struct gendisk *disk, fmode_t mode) 2377static int dasd_release(struct gendisk *disk, fmode_t mode)
2286{ 2378{
2287 struct dasd_block *block = disk->private_data; 2379 struct dasd_device *base;
2380
2381 base = dasd_device_from_gendisk(disk);
2382 if (!base)
2383 return -ENODEV;
2288 2384
2289 lock_kernel(); 2385 atomic_dec(&base->block->open_count);
2290 atomic_dec(&block->open_count); 2386 module_put(base->discipline->owner);
2291 module_put(block->base->discipline->owner); 2387 dasd_put_device(base);
2292 unlock_kernel();
2293 return 0; 2388 return 0;
2294} 2389}
2295 2390
@@ -2298,20 +2393,20 @@ static int dasd_release(struct gendisk *disk, fmode_t mode)
2298 */ 2393 */
2299static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2394static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2300{ 2395{
2301 struct dasd_block *block;
2302 struct dasd_device *base; 2396 struct dasd_device *base;
2303 2397
2304 block = bdev->bd_disk->private_data; 2398 base = dasd_device_from_gendisk(bdev->bd_disk);
2305 if (!block) 2399 if (!base)
2306 return -ENODEV; 2400 return -ENODEV;
2307 base = block->base;
2308 2401
2309 if (!base->discipline || 2402 if (!base->discipline ||
2310 !base->discipline->fill_geometry) 2403 !base->discipline->fill_geometry) {
2404 dasd_put_device(base);
2311 return -EINVAL; 2405 return -EINVAL;
2312 2406 }
2313 base->discipline->fill_geometry(block, geo); 2407 base->discipline->fill_geometry(base->block, geo);
2314 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2408 geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
2409 dasd_put_device(base);
2315 return 0; 2410 return 0;
2316} 2411}
2317 2412
@@ -2448,7 +2543,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
2448 dasd_set_target_state(device, DASD_STATE_NEW); 2543 dasd_set_target_state(device, DASD_STATE_NEW);
2449 /* dasd_delete_device destroys the device reference. */ 2544 /* dasd_delete_device destroys the device reference. */
2450 block = device->block; 2545 block = device->block;
2451 device->block = NULL;
2452 dasd_delete_device(device); 2546 dasd_delete_device(device);
2453 /* 2547 /*
2454 * life cycle of block is bound to device, so delete it after 2548 * life cycle of block is bound to device, so delete it after
@@ -2570,7 +2664,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2570 dasd_set_target_state(device, DASD_STATE_NEW); 2664 dasd_set_target_state(device, DASD_STATE_NEW);
2571 /* dasd_delete_device destroys the device reference. */ 2665 /* dasd_delete_device destroys the device reference. */
2572 block = device->block; 2666 block = device->block;
2573 device->block = NULL;
2574 dasd_delete_device(device); 2667 dasd_delete_device(device);
2575 /* 2668 /*
2576 * life cycle of block is bound to device, so delete it after 2669 * life cycle of block is bound to device, so delete it after
@@ -2581,10 +2674,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2581 return 0; 2674 return 0;
2582} 2675}
2583 2676
2677int dasd_generic_last_path_gone(struct dasd_device *device)
2678{
2679 struct dasd_ccw_req *cqr;
2680
2681 dev_warn(&device->cdev->dev, "No operational channel path is left "
2682 "for the device\n");
2683 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
2684 /* First of all call extended error reporting. */
2685 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2686
2687 if (device->state < DASD_STATE_BASIC)
2688 return 0;
2689 /* Device is active. We want to keep it. */
2690 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2691 if ((cqr->status == DASD_CQR_IN_IO) ||
2692 (cqr->status == DASD_CQR_CLEAR_PENDING)) {
2693 cqr->status = DASD_CQR_QUEUED;
2694 cqr->retries++;
2695 }
2696 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2697 dasd_device_clear_timer(device);
2698 dasd_schedule_device_bh(device);
2699 return 1;
2700}
2701EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
2702
2703int dasd_generic_path_operational(struct dasd_device *device)
2704{
2705 dev_info(&device->cdev->dev, "A channel path to the device has become "
2706 "operational\n");
2707 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
2708 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2709 if (device->stopped & DASD_UNRESUMED_PM) {
2710 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2711 dasd_restore_device(device);
2712 return 1;
2713 }
2714 dasd_schedule_device_bh(device);
2715 if (device->block)
2716 dasd_schedule_block_bh(device->block);
2717 return 1;
2718}
2719EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
2720
2584int dasd_generic_notify(struct ccw_device *cdev, int event) 2721int dasd_generic_notify(struct ccw_device *cdev, int event)
2585{ 2722{
2586 struct dasd_device *device; 2723 struct dasd_device *device;
2587 struct dasd_ccw_req *cqr;
2588 int ret; 2724 int ret;
2589 2725
2590 device = dasd_device_from_cdev_locked(cdev); 2726 device = dasd_device_from_cdev_locked(cdev);
@@ -2595,41 +2731,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2595 case CIO_GONE: 2731 case CIO_GONE:
2596 case CIO_BOXED: 2732 case CIO_BOXED:
2597 case CIO_NO_PATH: 2733 case CIO_NO_PATH:
2598 /* First of all call extended error reporting. */ 2734 device->path_data.opm = 0;
2599 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2735 device->path_data.ppm = 0;
2600 2736 device->path_data.npm = 0;
2601 if (device->state < DASD_STATE_BASIC) 2737 ret = dasd_generic_last_path_gone(device);
2602 break;
2603 /* Device is active. We want to keep it. */
2604 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2605 if (cqr->status == DASD_CQR_IN_IO) {
2606 cqr->status = DASD_CQR_QUEUED;
2607 cqr->retries++;
2608 }
2609 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2610 dasd_device_clear_timer(device);
2611 dasd_schedule_device_bh(device);
2612 ret = 1;
2613 break; 2738 break;
2614 case CIO_OPER: 2739 case CIO_OPER:
2615 /* FIXME: add a sanity check. */
2616 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2617 if (device->stopped & DASD_UNRESUMED_PM) {
2618 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2619 dasd_restore_device(device);
2620 ret = 1;
2621 break;
2622 }
2623 dasd_schedule_device_bh(device);
2624 if (device->block)
2625 dasd_schedule_block_bh(device->block);
2626 ret = 1; 2740 ret = 1;
2741 if (device->path_data.opm)
2742 ret = dasd_generic_path_operational(device);
2627 break; 2743 break;
2628 } 2744 }
2629 dasd_put_device(device); 2745 dasd_put_device(device);
2630 return ret; 2746 return ret;
2631} 2747}
2632 2748
2749void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
2750{
2751 int chp;
2752 __u8 oldopm, eventlpm;
2753 struct dasd_device *device;
2754
2755 device = dasd_device_from_cdev_locked(cdev);
2756 if (IS_ERR(device))
2757 return;
2758 for (chp = 0; chp < 8; chp++) {
2759 eventlpm = 0x80 >> chp;
2760 if (path_event[chp] & PE_PATH_GONE) {
2761 oldopm = device->path_data.opm;
2762 device->path_data.opm &= ~eventlpm;
2763 device->path_data.ppm &= ~eventlpm;
2764 device->path_data.npm &= ~eventlpm;
2765 if (oldopm && !device->path_data.opm)
2766 dasd_generic_last_path_gone(device);
2767 }
2768 if (path_event[chp] & PE_PATH_AVAILABLE) {
2769 device->path_data.opm &= ~eventlpm;
2770 device->path_data.ppm &= ~eventlpm;
2771 device->path_data.npm &= ~eventlpm;
2772 device->path_data.tbvpm |= eventlpm;
2773 dasd_schedule_device_bh(device);
2774 }
2775 }
2776 dasd_put_device(device);
2777}
2778EXPORT_SYMBOL_GPL(dasd_generic_path_event);
2779
2780int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
2781{
2782 if (!device->path_data.opm && lpm) {
2783 device->path_data.opm = lpm;
2784 dasd_generic_path_operational(device);
2785 } else
2786 device->path_data.opm |= lpm;
2787 return 0;
2788}
2789EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
2790
2791
2633int dasd_generic_pm_freeze(struct ccw_device *cdev) 2792int dasd_generic_pm_freeze(struct ccw_device *cdev)
2634{ 2793{
2635 struct dasd_ccw_req *cqr, *n; 2794 struct dasd_ccw_req *cqr, *n;
@@ -2639,6 +2798,10 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2639 2798
2640 if (IS_ERR(device)) 2799 if (IS_ERR(device))
2641 return PTR_ERR(device); 2800 return PTR_ERR(device);
2801
2802 if (device->discipline->freeze)
2803 rc = device->discipline->freeze(device);
2804
2642 /* disallow new I/O */ 2805 /* disallow new I/O */
2643 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2806 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2644 /* clear active requests */ 2807 /* clear active requests */
@@ -2675,9 +2838,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2675 list_splice_tail(&freeze_queue, &device->ccw_queue); 2838 list_splice_tail(&freeze_queue, &device->ccw_queue);
2676 spin_unlock_irq(get_ccwdev_lock(cdev)); 2839 spin_unlock_irq(get_ccwdev_lock(cdev));
2677 2840
2678 if (device->discipline->freeze)
2679 rc = device->discipline->freeze(device);
2680
2681 dasd_put_device(device); 2841 dasd_put_device(device);
2682 return rc; 2842 return rc;
2683} 2843}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 85bfd8794856..87a0cf160fe5 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
153 opm = ccw_device_get_path_mask(device->cdev); 153 opm = ccw_device_get_path_mask(device->cdev);
154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
155 //FIXME: start with get_opm ?
156 if (erp->lpm == 0) 155 if (erp->lpm == 0)
157 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); 156 erp->lpm = device->path_data.opm &
157 ~(erp->irb.esw.esw0.sublog.lpum);
158 else 158 else
159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); 159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
160 160
@@ -221,6 +221,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
221 ccw->cmd_code = CCW_CMD_DCTL; 221 ccw->cmd_code = CCW_CMD_DCTL;
222 ccw->count = 4; 222 ccw->count = 4;
223 ccw->cda = (__u32)(addr_t) DCTL_data; 223 ccw->cda = (__u32)(addr_t) DCTL_data;
224 dctl_cqr->flags = erp->flags;
224 dctl_cqr->function = dasd_3990_erp_DCTL; 225 dctl_cqr->function = dasd_3990_erp_DCTL;
225 dctl_cqr->refers = erp; 226 dctl_cqr->refers = erp;
226 dctl_cqr->startdev = device; 227 dctl_cqr->startdev = device;
@@ -269,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
269{ 270{
270 erp->function = dasd_3990_erp_action_1; 271 erp->function = dasd_3990_erp_action_1;
271 dasd_3990_erp_alternate_path(erp); 272 dasd_3990_erp_alternate_path(erp);
272 if (erp->status == DASD_CQR_FAILED) { 273 if (erp->status == DASD_CQR_FAILED &&
274 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
273 erp->status = DASD_CQR_FILLED; 275 erp->status = DASD_CQR_FILLED;
274 erp->retries = 10; 276 erp->retries = 10;
275 erp->lpm = LPM_ANYPATH; 277 erp->lpm = erp->startdev->path_data.opm;
276 erp->function = dasd_3990_erp_action_1_sec; 278 erp->function = dasd_3990_erp_action_1_sec;
277 } 279 }
278 return erp; 280 return erp;
@@ -1710,6 +1712,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1710 ccw->cda = cpa; 1712 ccw->cda = cpa;
1711 1713
1712 /* fill erp related fields */ 1714 /* fill erp related fields */
1715 erp->flags = default_erp->flags;
1713 erp->function = dasd_3990_erp_action_1B_32; 1716 erp->function = dasd_3990_erp_action_1B_32;
1714 erp->refers = default_erp->refers; 1717 erp->refers = default_erp->refers;
1715 erp->startdev = device; 1718 erp->startdev = device;
@@ -1905,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
1905static void 1908static void
1906dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) 1909dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1907{ 1910{
1908
1909 if (sense[25] & DASD_SENSE_BIT_3) { 1911 if (sense[25] & DASD_SENSE_BIT_3) {
1910 dasd_3990_erp_alternate_path(erp); 1912 dasd_3990_erp_alternate_path(erp);
1911 1913
1912 if (erp->status == DASD_CQR_FAILED) { 1914 if (erp->status == DASD_CQR_FAILED &&
1915 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
1913 /* reset the lpm and the status to be able to 1916 /* reset the lpm and the status to be able to
1914 * try further actions. */ 1917 * try further actions. */
1915 1918 erp->lpm = erp->startdev->path_data.opm;
1916 erp->lpm = 0;
1917 erp->status = DASD_CQR_NEED_ERP; 1919 erp->status = DASD_CQR_NEED_ERP;
1918 } 1920 }
1919 } 1921 }
@@ -2197,7 +2199,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2197 2199
2198/* 2200/*
2199 ***************************************************************************** 2201 *****************************************************************************
2200 * main ERP control fuctions (24 and 32 byte sense) 2202 * main ERP control functions (24 and 32 byte sense)
2201 ***************************************************************************** 2203 *****************************************************************************
2202 */ 2204 */
2203 2205
@@ -2205,7 +2207,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2205 * DASD_3990_ERP_CONTROL_CHECK 2207 * DASD_3990_ERP_CONTROL_CHECK
2206 * 2208 *
2207 * DESCRIPTION 2209 * DESCRIPTION
2208 * Does a generic inspection if a control check occured and sets up 2210 * Does a generic inspection if a control check occurred and sets up
2209 * the related error recovery procedure 2211 * the related error recovery procedure
2210 * 2212 *
2211 * PARAMETER 2213 * PARAMETER
@@ -2248,7 +2250,7 @@ dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
2248 struct dasd_ccw_req *erp_new = NULL; 2250 struct dasd_ccw_req *erp_new = NULL;
2249 char *sense; 2251 char *sense;
2250 2252
2251 /* if this problem occured on an alias retry on base */ 2253 /* if this problem occurred on an alias retry on base */
2252 erp_new = dasd_3990_erp_inspect_alias(erp); 2254 erp_new = dasd_3990_erp_inspect_alias(erp);
2253 if (erp_new) 2255 if (erp_new)
2254 return erp_new; 2256 return erp_new;
@@ -2280,7 +2282,7 @@ dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
2280 * DASD_3990_ERP_ADD_ERP 2282 * DASD_3990_ERP_ADD_ERP
2281 * 2283 *
2282 * DESCRIPTION 2284 * DESCRIPTION
2283 * This funtion adds an additional request block (ERP) to the head of 2285 * This function adds an additional request block (ERP) to the head of
2284 * the given cqr (or erp). 2286 * the given cqr (or erp).
2285 * For a command mode cqr the erp is initialized as an default erp 2287 * For a command mode cqr the erp is initialized as an default erp
2286 * (retry TIC). 2288 * (retry TIC).
@@ -2354,6 +2356,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2354 ccw->cda = (long)(cqr->cpaddr); 2356 ccw->cda = (long)(cqr->cpaddr);
2355 } 2357 }
2356 2358
2359 erp->flags = cqr->flags;
2357 erp->function = dasd_3990_erp_add_erp; 2360 erp->function = dasd_3990_erp_add_erp;
2358 erp->refers = cqr; 2361 erp->refers = cqr;
2359 erp->startdev = device; 2362 erp->startdev = device;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805dcdff..c388eda1e2b1 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -253,13 +253,11 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
253 */ 253 */
254void dasd_alias_lcu_setup_complete(struct dasd_device *device) 254void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255{ 255{
256 struct dasd_eckd_private *private;
257 unsigned long flags; 256 unsigned long flags;
258 struct alias_server *server; 257 struct alias_server *server;
259 struct alias_lcu *lcu; 258 struct alias_lcu *lcu;
260 struct dasd_uid uid; 259 struct dasd_uid uid;
261 260
262 private = (struct dasd_eckd_private *) device->private;
263 device->discipline->get_uid(device, &uid); 261 device->discipline->get_uid(device, &uid);
264 lcu = NULL; 262 lcu = NULL;
265 spin_lock_irqsave(&aliastree.lock, flags); 263 spin_lock_irqsave(&aliastree.lock, flags);
@@ -279,13 +277,11 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
279 277
280void dasd_alias_wait_for_lcu_setup(struct dasd_device *device) 278void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
281{ 279{
282 struct dasd_eckd_private *private;
283 unsigned long flags; 280 unsigned long flags;
284 struct alias_server *server; 281 struct alias_server *server;
285 struct alias_lcu *lcu; 282 struct alias_lcu *lcu;
286 struct dasd_uid uid; 283 struct dasd_uid uid;
287 284
288 private = (struct dasd_eckd_private *) device->private;
289 device->discipline->get_uid(device, &uid); 285 device->discipline->get_uid(device, &uid);
290 lcu = NULL; 286 lcu = NULL;
291 spin_lock_irqsave(&aliastree.lock, flags); 287 spin_lock_irqsave(&aliastree.lock, flags);
@@ -319,6 +315,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
319 315
320 private = (struct dasd_eckd_private *) device->private; 316 private = (struct dasd_eckd_private *) device->private;
321 lcu = private->lcu; 317 lcu = private->lcu;
318 /* nothing to do if already disconnected */
319 if (!lcu)
320 return;
322 device->discipline->get_uid(device, &uid); 321 device->discipline->get_uid(device, &uid);
323 spin_lock_irqsave(&lcu->lock, flags); 322 spin_lock_irqsave(&lcu->lock, flags);
324 list_del_init(&device->alias_list); 323 list_del_init(&device->alias_list);
@@ -680,6 +679,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
680 679
681 private = (struct dasd_eckd_private *) device->private; 680 private = (struct dasd_eckd_private *) device->private;
682 lcu = private->lcu; 681 lcu = private->lcu;
682 /* nothing to do if already removed */
683 if (!lcu)
684 return 0;
683 spin_lock_irqsave(&lcu->lock, flags); 685 spin_lock_irqsave(&lcu->lock, flags);
684 _remove_device_from_lcu(lcu, device); 686 _remove_device_from_lcu(lcu, device);
685 spin_unlock_irqrestore(&lcu->lock, flags); 687 spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8d41f3ed38d7..d71511c7850a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp)
208 features |= DASD_FEATURE_READONLY; 208 features |= DASD_FEATURE_READONLY;
209 else if (len == 4 && !strncmp(str, "diag", 4)) 209 else if (len == 4 && !strncmp(str, "diag", 4))
210 features |= DASD_FEATURE_USEDIAG; 210 features |= DASD_FEATURE_USEDIAG;
211 else if (len == 3 && !strncmp(str, "raw", 3))
212 features |= DASD_FEATURE_USERAW;
211 else if (len == 6 && !strncmp(str, "erplog", 6)) 213 else if (len == 6 && !strncmp(str, "erplog", 6))
212 features |= DASD_FEATURE_ERPLOG; 214 features |= DASD_FEATURE_ERPLOG;
213 else if (len == 8 && !strncmp(str, "failfast", 8)) 215 else if (len == 8 && !strncmp(str, "failfast", 8))
@@ -300,7 +302,7 @@ dasd_parse_keyword( char *parsestring ) {
300/* 302/*
301 * Try to interprete the first element on the comma separated parse string 303 * Try to interprete the first element on the comma separated parse string
302 * as a device number or a range of devices. If the interpretation is 304 * as a device number or a range of devices. If the interpretation is
303 * successfull, create the matching dasd_devmap entries and return a pointer 305 * successful, create the matching dasd_devmap entries and return a pointer
304 * to the residual string. 306 * to the residual string.
305 * If interpretation fails or in case of an error, return an error code. 307 * If interpretation fails or in case of an error, return an error code.
306 */ 308 */
@@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device)
639{ 641{
640 wake_up(&dasd_delete_wq); 642 wake_up(&dasd_delete_wq);
641} 643}
644EXPORT_SYMBOL_GPL(dasd_put_device_wake);
642 645
643/* 646/*
644 * Return dasd_device structure associated with cdev. 647 * Return dasd_device structure associated with cdev.
@@ -671,6 +674,36 @@ dasd_device_from_cdev(struct ccw_device *cdev)
671 return device; 674 return device;
672} 675}
673 676
677void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
678{
679 struct dasd_devmap *devmap;
680
681 devmap = dasd_find_busid(dev_name(&device->cdev->dev));
682 if (IS_ERR(devmap))
683 return;
684 spin_lock(&dasd_devmap_lock);
685 gdp->private_data = devmap;
686 spin_unlock(&dasd_devmap_lock);
687}
688
689struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
690{
691 struct dasd_device *device;
692 struct dasd_devmap *devmap;
693
694 if (!gdp->private_data)
695 return NULL;
696 device = NULL;
697 spin_lock(&dasd_devmap_lock);
698 devmap = gdp->private_data;
699 if (devmap && devmap->device) {
700 device = devmap->device;
701 dasd_get_device(device);
702 }
703 spin_unlock(&dasd_devmap_lock);
704 return device;
705}
706
674/* 707/*
675 * SECTION: files in sysfs 708 * SECTION: files in sysfs
676 */ 709 */
@@ -856,7 +889,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
856 spin_lock(&dasd_devmap_lock); 889 spin_lock(&dasd_devmap_lock);
857 /* Changing diag discipline flag is only allowed in offline state. */ 890 /* Changing diag discipline flag is only allowed in offline state. */
858 rc = count; 891 rc = count;
859 if (!devmap->device) { 892 if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
860 if (val) 893 if (val)
861 devmap->features |= DASD_FEATURE_USEDIAG; 894 devmap->features |= DASD_FEATURE_USEDIAG;
862 else 895 else
@@ -869,6 +902,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
869 902
870static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store); 903static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
871 904
905/*
906 * use_raw controls whether the driver should give access to raw eckd data or
907 * operate in standard mode
908 */
909static ssize_t
910dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
911{
912 struct dasd_devmap *devmap;
913 int use_raw;
914
915 devmap = dasd_find_busid(dev_name(dev));
916 if (!IS_ERR(devmap))
917 use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
918 else
919 use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
920 return sprintf(buf, use_raw ? "1\n" : "0\n");
921}
922
923static ssize_t
924dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
925 const char *buf, size_t count)
926{
927 struct dasd_devmap *devmap;
928 ssize_t rc;
929 unsigned long val;
930
931 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
932 if (IS_ERR(devmap))
933 return PTR_ERR(devmap);
934
935 if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
936 return -EINVAL;
937
938 spin_lock(&dasd_devmap_lock);
939 /* Changing diag discipline flag is only allowed in offline state. */
940 rc = count;
941 if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
942 if (val)
943 devmap->features |= DASD_FEATURE_USERAW;
944 else
945 devmap->features &= ~DASD_FEATURE_USERAW;
946 } else
947 rc = -EPERM;
948 spin_unlock(&dasd_devmap_lock);
949 return rc;
950}
951
952static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
953 dasd_use_raw_store);
954
872static ssize_t 955static ssize_t
873dasd_discipline_show(struct device *dev, struct device_attribute *attr, 956dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 char *buf) 957 char *buf)
@@ -1126,6 +1209,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
1126 1209
1127static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); 1210static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
1128 1211
1212static ssize_t dasd_reservation_policy_show(struct device *dev,
1213 struct device_attribute *attr,
1214 char *buf)
1215{
1216 struct dasd_devmap *devmap;
1217 int rc = 0;
1218
1219 devmap = dasd_find_busid(dev_name(dev));
1220 if (IS_ERR(devmap)) {
1221 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1222 } else {
1223 spin_lock(&dasd_devmap_lock);
1224 if (devmap->features & DASD_FEATURE_FAILONSLCK)
1225 rc = snprintf(buf, PAGE_SIZE, "fail\n");
1226 else
1227 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1228 spin_unlock(&dasd_devmap_lock);
1229 }
1230 return rc;
1231}
1232
1233static ssize_t dasd_reservation_policy_store(struct device *dev,
1234 struct device_attribute *attr,
1235 const char *buf, size_t count)
1236{
1237 struct dasd_devmap *devmap;
1238 int rc;
1239
1240 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
1241 if (IS_ERR(devmap))
1242 return PTR_ERR(devmap);
1243 rc = 0;
1244 spin_lock(&dasd_devmap_lock);
1245 if (sysfs_streq("ignore", buf))
1246 devmap->features &= ~DASD_FEATURE_FAILONSLCK;
1247 else if (sysfs_streq("fail", buf))
1248 devmap->features |= DASD_FEATURE_FAILONSLCK;
1249 else
1250 rc = -EINVAL;
1251 if (devmap->device)
1252 devmap->device->features = devmap->features;
1253 spin_unlock(&dasd_devmap_lock);
1254 if (rc)
1255 return rc;
1256 else
1257 return count;
1258}
1259
1260static DEVICE_ATTR(reservation_policy, 0644,
1261 dasd_reservation_policy_show, dasd_reservation_policy_store);
1262
1263static ssize_t dasd_reservation_state_show(struct device *dev,
1264 struct device_attribute *attr,
1265 char *buf)
1266{
1267 struct dasd_device *device;
1268 int rc = 0;
1269
1270 device = dasd_device_from_cdev(to_ccwdev(dev));
1271 if (IS_ERR(device))
1272 return snprintf(buf, PAGE_SIZE, "none\n");
1273
1274 if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
1275 rc = snprintf(buf, PAGE_SIZE, "reserved\n");
1276 else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
1277 rc = snprintf(buf, PAGE_SIZE, "lost\n");
1278 else
1279 rc = snprintf(buf, PAGE_SIZE, "none\n");
1280 dasd_put_device(device);
1281 return rc;
1282}
1283
1284static ssize_t dasd_reservation_state_store(struct device *dev,
1285 struct device_attribute *attr,
1286 const char *buf, size_t count)
1287{
1288 struct dasd_device *device;
1289 int rc = 0;
1290
1291 device = dasd_device_from_cdev(to_ccwdev(dev));
1292 if (IS_ERR(device))
1293 return -ENODEV;
1294 if (sysfs_streq("reset", buf))
1295 clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
1296 else
1297 rc = -EINVAL;
1298 dasd_put_device(device);
1299
1300 if (rc)
1301 return rc;
1302 else
1303 return count;
1304}
1305
1306static DEVICE_ATTR(last_known_reservation_state, 0644,
1307 dasd_reservation_state_show, dasd_reservation_state_store);
1308
1129static struct attribute * dasd_attrs[] = { 1309static struct attribute * dasd_attrs[] = {
1130 &dev_attr_readonly.attr, 1310 &dev_attr_readonly.attr,
1131 &dev_attr_discipline.attr, 1311 &dev_attr_discipline.attr,
@@ -1134,10 +1314,13 @@ static struct attribute * dasd_attrs[] = {
1134 &dev_attr_vendor.attr, 1314 &dev_attr_vendor.attr,
1135 &dev_attr_uid.attr, 1315 &dev_attr_uid.attr,
1136 &dev_attr_use_diag.attr, 1316 &dev_attr_use_diag.attr,
1317 &dev_attr_raw_track_access.attr,
1137 &dev_attr_eer_enabled.attr, 1318 &dev_attr_eer_enabled.attr,
1138 &dev_attr_erplog.attr, 1319 &dev_attr_erplog.attr,
1139 &dev_attr_failfast.attr, 1320 &dev_attr_failfast.attr,
1140 &dev_attr_expires.attr, 1321 &dev_attr_expires.attr,
1322 &dev_attr_reservation_policy.attr,
1323 &dev_attr_last_known_reservation_state.attr,
1141 NULL, 1324 NULL,
1142}; 1325};
1143 1326
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2b3bc3ec0541..46784b83c5c4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -10,6 +10,7 @@
10 10
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/kernel_stat.h>
13#include <linux/stddef.h> 14#include <linux/stddef.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -23,7 +24,7 @@
23#include <asm/debug.h> 24#include <asm/debug.h>
24#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/s390_ext.h> 27#include <asm/irq.h>
27#include <asm/vtoc.h> 28#include <asm/vtoc.h>
28#include <asm/diag.h> 29#include <asm/diag.h>
29 30
@@ -228,29 +229,27 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
228} 229}
229 230
230/* Handle external interruption. */ 231/* Handle external interruption. */
231static void 232static void dasd_ext_handler(unsigned int ext_int_code,
232dasd_ext_handler(__u16 code) 233 unsigned int param32, unsigned long param64)
233{ 234{
234 struct dasd_ccw_req *cqr, *next; 235 struct dasd_ccw_req *cqr, *next;
235 struct dasd_device *device; 236 struct dasd_device *device;
236 unsigned long long expires; 237 unsigned long long expires;
237 unsigned long flags; 238 unsigned long flags;
238 u8 int_code, status;
239 addr_t ip; 239 addr_t ip;
240 int rc; 240 int rc;
241 241
242 int_code = *((u8 *) DASD_DIAG_LC_INT_CODE); 242 switch (ext_int_code >> 24) {
243 status = *((u8 *) DASD_DIAG_LC_INT_STATUS);
244 switch (int_code) {
245 case DASD_DIAG_CODE_31BIT: 243 case DASD_DIAG_CODE_31BIT:
246 ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT); 244 ip = (addr_t) param32;
247 break; 245 break;
248 case DASD_DIAG_CODE_64BIT: 246 case DASD_DIAG_CODE_64BIT:
249 ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT); 247 ip = (addr_t) param64;
250 break; 248 break;
251 default: 249 default:
252 return; 250 return;
253 } 251 }
252 kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
254 if (!ip) { /* no intparm: unsolicited interrupt */ 253 if (!ip) { /* no intparm: unsolicited interrupt */
255 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " 254 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
256 "interrupt"); 255 "interrupt");
@@ -281,7 +280,7 @@ dasd_ext_handler(__u16 code)
281 cqr->stopclk = get_clock(); 280 cqr->stopclk = get_clock();
282 281
283 expires = 0; 282 expires = 0;
284 if (status == 0) { 283 if ((ext_int_code & 0xff0000) == 0) {
285 cqr->status = DASD_CQR_SUCCESS; 284 cqr->status = DASD_CQR_SUCCESS;
286 /* Start first request on queue if possible -> fast_io. */ 285 /* Start first request on queue if possible -> fast_io. */
287 if (!list_empty(&device->ccw_queue)) { 286 if (!list_empty(&device->ccw_queue)) {
@@ -296,8 +295,8 @@ dasd_ext_handler(__u16 code)
296 } else { 295 } else {
297 cqr->status = DASD_CQR_QUEUED; 296 cqr->status = DASD_CQR_QUEUED;
298 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " 297 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
299 "request %p was %d (%d retries left)", cqr, status, 298 "request %p was %d (%d retries left)", cqr,
300 cqr->retries); 299 (ext_int_code >> 16) & 0xff, cqr->retries);
301 dasd_diag_erp(device); 300 dasd_diag_erp(device);
302 } 301 }
303 302
@@ -620,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
620 .ebcname = "DIAG", 619 .ebcname = "DIAG",
621 .max_blocks = DIAG_MAX_BLOCKS, 620 .max_blocks = DIAG_MAX_BLOCKS,
622 .check_device = dasd_diag_check_device, 621 .check_device = dasd_diag_check_device,
622 .verify_path = dasd_generic_verify_path,
623 .fill_geometry = dasd_diag_fill_geometry, 623 .fill_geometry = dasd_diag_fill_geometry,
624 .start_IO = dasd_start_diag, 624 .start_IO = dasd_start_diag,
625 .term_IO = dasd_diag_term_IO, 625 .term_IO = dasd_diag_term_IO,
@@ -642,7 +642,7 @@ dasd_diag_init(void)
642 } 642 }
643 ASCEBC(dasd_diag_discipline.ebcname, 4); 643 ASCEBC(dasd_diag_discipline.ebcname, 4);
644 644
645 ctl_set_bit(0, 9); 645 service_subclass_irq_register();
646 register_external_interrupt(0x2603, dasd_ext_handler); 646 register_external_interrupt(0x2603, dasd_ext_handler);
647 dasd_diag_discipline_pointer = &dasd_diag_discipline; 647 dasd_diag_discipline_pointer = &dasd_diag_discipline;
648 return 0; 648 return 0;
@@ -652,7 +652,7 @@ static void __exit
652dasd_diag_cleanup(void) 652dasd_diag_cleanup(void)
653{ 653{
654 unregister_external_interrupt(0x2603, dasd_ext_handler); 654 unregister_external_interrupt(0x2603, dasd_ext_handler);
655 ctl_clear_bit(0, 9); 655 service_subclass_irq_unregister();
656 dasd_diag_discipline_pointer = NULL; 656 dasd_diag_discipline_pointer = NULL;
657} 657}
658 658
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index b8c78267ff3e..4f71fbe60c82 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -18,10 +18,6 @@
18#define DEV_CLASS_FBA 0x01 18#define DEV_CLASS_FBA 0x01
19#define DEV_CLASS_ECKD 0x04 19#define DEV_CLASS_ECKD 0x04
20 20
21#define DASD_DIAG_LC_INT_CODE 132
22#define DASD_DIAG_LC_INT_STATUS 133
23#define DASD_DIAG_LC_INT_PARM_31BIT 128
24#define DASD_DIAG_LC_INT_PARM_64BIT 4536
25#define DASD_DIAG_CODE_31BIT 0x03 21#define DASD_DIAG_CODE_31BIT 0x03
26#define DASD_DIAG_CODE_64BIT 0x07 22#define DASD_DIAG_CODE_64BIT 0x07
27 23
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 66360c24bd48..30fb979d684d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -54,6 +54,15 @@
54#define ECKD_F7(i) (i->factor7) 54#define ECKD_F7(i) (i->factor7)
55#define ECKD_F8(i) (i->factor8) 55#define ECKD_F8(i) (i->factor8)
56 56
57/*
58 * raw track access always map to 64k in memory
59 * so it maps to 16 blocks of 4k per track
60 */
61#define DASD_RAW_BLOCK_PER_TRACK 16
62#define DASD_RAW_BLOCKSIZE 4096
63/* 64k are 128 x 512 byte sectors */
64#define DASD_RAW_SECTORS_PER_TRACK 128
65
57MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
58 67
59static struct dasd_discipline dasd_eckd_discipline; 68static struct dasd_discipline dasd_eckd_discipline;
@@ -63,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
63static struct ccw_device_id dasd_eckd_ids[] = { 72static struct ccw_device_id dasd_eckd_ids[] = {
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
66 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
67 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
68 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
69 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
@@ -90,6 +99,18 @@ static struct {
90} *dasd_reserve_req; 99} *dasd_reserve_req;
91static DEFINE_MUTEX(dasd_reserve_mutex); 100static DEFINE_MUTEX(dasd_reserve_mutex);
92 101
102/* definitions for the path verification worker */
103struct path_verification_work_data {
104 struct work_struct worker;
105 struct dasd_device *device;
106 struct dasd_ccw_req cqr;
107 struct ccw1 ccw;
108 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
109 int isglobal;
110 __u8 tbvpm;
111};
112static struct path_verification_work_data *path_verification_worker;
113static DEFINE_MUTEX(dasd_path_verification_mutex);
93 114
94/* initial attempt at a probe function. this can be simplified once 115/* initial attempt at a probe function. this can be simplified once
95 * the other detection code is gone */ 116 * the other detection code is gone */
@@ -373,6 +394,23 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
373 data->length = reclen; 394 data->length = reclen;
374 data->operation.operation = 0x03; 395 data->operation.operation = 0x03;
375 break; 396 break;
397 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
398 data->operation.orientation = 0x0;
399 data->operation.operation = 0x3F;
400 data->extended_operation = 0x11;
401 data->length = 0;
402 data->extended_parameter_length = 0x02;
403 if (data->count > 8) {
404 data->extended_parameter[0] = 0xFF;
405 data->extended_parameter[1] = 0xFF;
406 data->extended_parameter[1] <<= (16 - count);
407 } else {
408 data->extended_parameter[0] = 0xFF;
409 data->extended_parameter[0] <<= (8 - count);
410 data->extended_parameter[1] = 0x00;
411 }
412 data->sector = 0xFF;
413 break;
376 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 414 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
377 data->auxiliary.length_valid = 0x1; 415 data->auxiliary.length_valid = 0x1;
378 data->length = reclen; /* not tlf, as one might think */ 416 data->length = reclen; /* not tlf, as one might think */
@@ -396,6 +434,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
396 case DASD_ECKD_CCW_READ_COUNT: 434 case DASD_ECKD_CCW_READ_COUNT:
397 data->operation.operation = 0x06; 435 data->operation.operation = 0x06;
398 break; 436 break;
437 case DASD_ECKD_CCW_READ_TRACK:
438 data->operation.orientation = 0x1;
439 data->operation.operation = 0x0C;
440 data->extended_parameter_length = 0;
441 data->sector = 0xFF;
442 break;
399 case DASD_ECKD_CCW_READ_TRACK_DATA: 443 case DASD_ECKD_CCW_READ_TRACK_DATA:
400 data->auxiliary.length_valid = 0x1; 444 data->auxiliary.length_valid = 0x1;
401 data->length = tlf; 445 data->length = tlf;
@@ -439,10 +483,16 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
439 483
440 ccw->cmd_code = DASD_ECKD_CCW_PFX; 484 ccw->cmd_code = DASD_ECKD_CCW_PFX;
441 ccw->flags = 0; 485 ccw->flags = 0;
442 ccw->count = sizeof(*pfxdata); 486 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
443 ccw->cda = (__u32) __pa(pfxdata); 487 ccw->count = sizeof(*pfxdata) + 2;
488 ccw->cda = (__u32) __pa(pfxdata);
489 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
490 } else {
491 ccw->count = sizeof(*pfxdata);
492 ccw->cda = (__u32) __pa(pfxdata);
493 memset(pfxdata, 0, sizeof(*pfxdata));
494 }
444 495
445 memset(pfxdata, 0, sizeof(*pfxdata));
446 /* prefix data */ 496 /* prefix data */
447 if (format > 1) { 497 if (format > 1) {
448 DBF_DEV_EVENT(DBF_ERR, basedev, 498 DBF_DEV_EVENT(DBF_ERR, basedev,
@@ -476,6 +526,7 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
476 dedata->mask.perm = 0x1; 526 dedata->mask.perm = 0x1;
477 dedata->attributes.operation = basepriv->attrib.operation; 527 dedata->attributes.operation = basepriv->attrib.operation;
478 break; 528 break;
529 case DASD_ECKD_CCW_READ_TRACK:
479 case DASD_ECKD_CCW_READ_TRACK_DATA: 530 case DASD_ECKD_CCW_READ_TRACK_DATA:
480 dedata->mask.perm = 0x1; 531 dedata->mask.perm = 0x1;
481 dedata->attributes.operation = basepriv->attrib.operation; 532 dedata->attributes.operation = basepriv->attrib.operation;
@@ -502,6 +553,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
502 dedata->attributes.operation = DASD_BYPASS_CACHE; 553 dedata->attributes.operation = DASD_BYPASS_CACHE;
503 rc = check_XRC_on_prefix(pfxdata, basedev); 554 rc = check_XRC_on_prefix(pfxdata, basedev);
504 break; 555 break;
556 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
557 dedata->mask.perm = 0x03;
558 dedata->attributes.operation = basepriv->attrib.operation;
559 dedata->blk_size = 0;
560 break;
505 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 561 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
506 dedata->mask.perm = 0x02; 562 dedata->mask.perm = 0x02;
507 dedata->attributes.operation = basepriv->attrib.operation; 563 dedata->attributes.operation = basepriv->attrib.operation;
@@ -755,26 +811,27 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
755 return -EINVAL; 811 return -EINVAL;
756} 812}
757 813
758static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 814static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
759 void *rcd_buffer, 815 struct dasd_ccw_req *cqr,
760 struct ciw *ciw, __u8 lpm) 816 __u8 *rcd_buffer,
817 __u8 lpm)
761{ 818{
762 struct dasd_ccw_req *cqr;
763 struct ccw1 *ccw; 819 struct ccw1 *ccw;
764 820 /*
765 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, 821 * buffer has to start with EBCDIC "V1.0" to show
766 device); 822 * support for virtual device SNEQ
767 823 */
768 if (IS_ERR(cqr)) { 824 rcd_buffer[0] = 0xE5;
769 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 825 rcd_buffer[1] = 0xF1;
770 "Could not allocate RCD request"); 826 rcd_buffer[2] = 0x4B;
771 return cqr; 827 rcd_buffer[3] = 0xF0;
772 }
773 828
774 ccw = cqr->cpaddr; 829 ccw = cqr->cpaddr;
775 ccw->cmd_code = ciw->cmd; 830 ccw->cmd_code = DASD_ECKD_CCW_RCD;
831 ccw->flags = 0;
776 ccw->cda = (__u32)(addr_t)rcd_buffer; 832 ccw->cda = (__u32)(addr_t)rcd_buffer;
777 ccw->count = ciw->count; 833 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
834 cqr->magic = DASD_ECKD_MAGIC;
778 835
779 cqr->startdev = device; 836 cqr->startdev = device;
780 cqr->memdev = device; 837 cqr->memdev = device;
@@ -784,7 +841,30 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
784 cqr->retries = 256; 841 cqr->retries = 256;
785 cqr->buildclk = get_clock(); 842 cqr->buildclk = get_clock();
786 cqr->status = DASD_CQR_FILLED; 843 cqr->status = DASD_CQR_FILLED;
787 return cqr; 844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
845}
846
847static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
848 struct dasd_ccw_req *cqr,
849 __u8 *rcd_buffer,
850 __u8 lpm)
851{
852 struct ciw *ciw;
853 int rc;
854 /*
855 * sanity check: scan for RCD command in extended SenseID data
856 * some devices do not support RCD
857 */
858 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
859 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
860 return -EOPNOTSUPP;
861
862 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
864 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
865 cqr->retries = 5;
866 rc = dasd_sleep_on_immediatly(cqr);
867 return rc;
788} 868}
789 869
790static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 870static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
@@ -797,32 +877,29 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
797 struct dasd_ccw_req *cqr; 877 struct dasd_ccw_req *cqr;
798 878
799 /* 879 /*
800 * scan for RCD command in extended SenseID data 880 * sanity check: scan for RCD command in extended SenseID data
881 * some devices do not support RCD
801 */ 882 */
802 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 883 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
803 if (!ciw || ciw->cmd == 0) { 884 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
804 ret = -EOPNOTSUPP; 885 ret = -EOPNOTSUPP;
805 goto out_error; 886 goto out_error;
806 } 887 }
807 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 888 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
808 if (!rcd_buf) { 889 if (!rcd_buf) {
809 ret = -ENOMEM; 890 ret = -ENOMEM;
810 goto out_error; 891 goto out_error;
811 } 892 }
812 893 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
813 /* 894 0, /* use rcd_buf as data ara */
814 * buffer has to start with EBCDIC "V1.0" to show 895 device);
815 * support for virtual device SNEQ
816 */
817 rcd_buf[0] = 0xE5;
818 rcd_buf[1] = 0xF1;
819 rcd_buf[2] = 0x4B;
820 rcd_buf[3] = 0xF0;
821 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
822 if (IS_ERR(cqr)) { 896 if (IS_ERR(cqr)) {
823 ret = PTR_ERR(cqr); 897 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
898 "Could not allocate RCD request");
899 ret = -ENOMEM;
824 goto out_error; 900 goto out_error;
825 } 901 }
902 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
826 ret = dasd_sleep_on(cqr); 903 ret = dasd_sleep_on(cqr);
827 /* 904 /*
828 * on success we update the user input parms 905 * on success we update the user input parms
@@ -831,7 +908,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
831 if (ret) 908 if (ret)
832 goto out_error; 909 goto out_error;
833 910
834 *rcd_buffer_size = ciw->count; 911 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
835 *rcd_buffer = rcd_buf; 912 *rcd_buffer = rcd_buf;
836 return 0; 913 return 0;
837out_error: 914out_error:
@@ -901,18 +978,18 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
901 void *conf_data; 978 void *conf_data;
902 int conf_len, conf_data_saved; 979 int conf_len, conf_data_saved;
903 int rc; 980 int rc;
904 __u8 lpm; 981 __u8 lpm, opm;
905 struct dasd_eckd_private *private; 982 struct dasd_eckd_private *private;
906 struct dasd_eckd_path *path_data; 983 struct dasd_path *path_data;
907 984
908 private = (struct dasd_eckd_private *) device->private; 985 private = (struct dasd_eckd_private *) device->private;
909 path_data = (struct dasd_eckd_path *) &private->path_data; 986 path_data = &device->path_data;
910 path_data->opm = ccw_device_get_path_mask(device->cdev); 987 opm = ccw_device_get_path_mask(device->cdev);
911 lpm = 0x80; 988 lpm = 0x80;
912 conf_data_saved = 0; 989 conf_data_saved = 0;
913 /* get configuration data per operational path */ 990 /* get configuration data per operational path */
914 for (lpm = 0x80; lpm; lpm>>= 1) { 991 for (lpm = 0x80; lpm; lpm>>= 1) {
915 if (lpm & path_data->opm){ 992 if (lpm & opm) {
916 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 993 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
917 &conf_len, lpm); 994 &conf_len, lpm);
918 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 995 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
@@ -925,6 +1002,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
925 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1002 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
926 "No configuration data " 1003 "No configuration data "
927 "retrieved"); 1004 "retrieved");
1005 /* no further analysis possible */
1006 path_data->opm |= lpm;
928 continue; /* no error */ 1007 continue; /* no error */
929 } 1008 }
930 /* save first valid configuration data */ 1009 /* save first valid configuration data */
@@ -948,6 +1027,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
948 path_data->ppm |= lpm; 1027 path_data->ppm |= lpm;
949 break; 1028 break;
950 } 1029 }
1030 path_data->opm |= lpm;
951 if (conf_data != private->conf_data) 1031 if (conf_data != private->conf_data)
952 kfree(conf_data); 1032 kfree(conf_data);
953 } 1033 }
@@ -955,6 +1035,140 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
955 return 0; 1035 return 0;
956} 1036}
957 1037
1038static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1039{
1040 struct dasd_eckd_private *private;
1041 int mdc;
1042 u32 fcx_max_data;
1043
1044 private = (struct dasd_eckd_private *) device->private;
1045 if (private->fcx_max_data) {
1046 mdc = ccw_device_get_mdc(device->cdev, lpm);
1047 if ((mdc < 0)) {
1048 dev_warn(&device->cdev->dev,
1049 "Detecting the maximum data size for zHPF "
1050 "requests failed (rc=%d) for a new path %x\n",
1051 mdc, lpm);
1052 return mdc;
1053 }
1054 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
1055 if (fcx_max_data < private->fcx_max_data) {
1056 dev_warn(&device->cdev->dev,
1057 "The maximum data size for zHPF requests %u "
1058 "on a new path %x is below the active maximum "
1059 "%u\n", fcx_max_data, lpm,
1060 private->fcx_max_data);
1061 return -EACCES;
1062 }
1063 }
1064 return 0;
1065}
1066
1067static void do_path_verification_work(struct work_struct *work)
1068{
1069 struct path_verification_work_data *data;
1070 struct dasd_device *device;
1071 __u8 lpm, opm, npm, ppm, epm;
1072 unsigned long flags;
1073 int rc;
1074
1075 data = container_of(work, struct path_verification_work_data, worker);
1076 device = data->device;
1077
1078 opm = 0;
1079 npm = 0;
1080 ppm = 0;
1081 epm = 0;
1082 for (lpm = 0x80; lpm; lpm >>= 1) {
1083 if (lpm & data->tbvpm) {
1084 memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1085 memset(&data->cqr, 0, sizeof(data->cqr));
1086 data->cqr.cpaddr = &data->ccw;
1087 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1088 data->rcd_buffer,
1089 lpm);
1090 if (!rc) {
1091 switch (dasd_eckd_path_access(data->rcd_buffer,
1092 DASD_ECKD_RCD_DATA_SIZE)) {
1093 case 0x02:
1094 npm |= lpm;
1095 break;
1096 case 0x03:
1097 ppm |= lpm;
1098 break;
1099 }
1100 opm |= lpm;
1101 } else if (rc == -EOPNOTSUPP) {
1102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1103 "path verification: No configuration "
1104 "data retrieved");
1105 opm |= lpm;
1106 } else if (rc == -EAGAIN) {
1107 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1108 "path verification: device is stopped,"
1109 " try again later");
1110 epm |= lpm;
1111 } else {
1112 dev_warn(&device->cdev->dev,
1113 "Reading device feature codes failed "
1114 "(rc=%d) for new path %x\n", rc, lpm);
1115 continue;
1116 }
1117 if (verify_fcx_max_data(device, lpm)) {
1118 opm &= ~lpm;
1119 npm &= ~lpm;
1120 ppm &= ~lpm;
1121 }
1122 }
1123 }
1124 /*
1125 * There is a small chance that a path is lost again between
1126 * above path verification and the following modification of
1127 * the device opm mask. We could avoid that race here by using
1128 * yet another path mask, but we rather deal with this unlikely
1129 * situation in dasd_start_IO.
1130 */
1131 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1132 if (!device->path_data.opm && opm) {
1133 device->path_data.opm = opm;
1134 dasd_generic_path_operational(device);
1135 } else
1136 device->path_data.opm |= opm;
1137 device->path_data.npm |= npm;
1138 device->path_data.ppm |= ppm;
1139 device->path_data.tbvpm |= epm;
1140 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1141
1142 dasd_put_device(device);
1143 if (data->isglobal)
1144 mutex_unlock(&dasd_path_verification_mutex);
1145 else
1146 kfree(data);
1147}
1148
1149static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1150{
1151 struct path_verification_work_data *data;
1152
1153 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1154 if (!data) {
1155 if (mutex_trylock(&dasd_path_verification_mutex)) {
1156 data = path_verification_worker;
1157 data->isglobal = 1;
1158 } else
1159 return -ENOMEM;
1160 } else {
1161 memset(data, 0, sizeof(*data));
1162 data->isglobal = 0;
1163 }
1164 INIT_WORK(&data->worker, do_path_verification_work);
1165 dasd_get_device(device);
1166 data->device = device;
1167 data->tbvpm = lpm;
1168 schedule_work(&data->worker);
1169 return 0;
1170}
1171
958static int dasd_eckd_read_features(struct dasd_device *device) 1172static int dasd_eckd_read_features(struct dasd_device *device)
959{ 1173{
960 struct dasd_psf_prssd_data *prssdp; 1174 struct dasd_psf_prssd_data *prssdp;
@@ -1105,6 +1319,37 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
1105 "returned rc=%d", private->uid.ssid, rc); 1319 "returned rc=%d", private->uid.ssid, rc);
1106} 1320}
1107 1321
1322static u32 get_fcx_max_data(struct dasd_device *device)
1323{
1324#if defined(CONFIG_64BIT)
1325 int tpm, mdc;
1326 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1327 struct dasd_eckd_private *private;
1328
1329 if (dasd_nofcx)
1330 return 0;
1331 /* is transport mode supported? */
1332 private = (struct dasd_eckd_private *) device->private;
1333 fcx_in_css = css_general_characteristics.fcx;
1334 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1335 fcx_in_features = private->features.feature[40] & 0x80;
1336 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1337
1338 if (!tpm)
1339 return 0;
1340
1341 mdc = ccw_device_get_mdc(device->cdev, 0);
1342 if (mdc < 0) {
1343 dev_warn(&device->cdev->dev, "Detecting the maximum supported"
1344 " data size for zHPF requests failed\n");
1345 return 0;
1346 } else
1347 return mdc * FCX_MAX_DATA_FACTOR;
1348#else
1349 return 0;
1350#endif
1351}
1352
1108/* 1353/*
1109 * Check device characteristics. 1354 * Check device characteristics.
1110 * If the device is accessible using ECKD discipline, the device is enabled. 1355 * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1190,7 +1435,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1190 goto out_err2; 1435 goto out_err2;
1191 } 1436 }
1192 /* 1437 /*
1193 * dasd_eckd_vaildate_server is done on the first device that 1438 * dasd_eckd_validate_server is done on the first device that
1194 * is found for an LCU. All later other devices have to wait 1439 * is found for an LCU. All later other devices have to wait
1195 * for it, so they will read the correct feature codes. 1440 * for it, so they will read the correct feature codes.
1196 */ 1441 */
@@ -1216,13 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1216 "Read device characteristic failed, rc=%d", rc); 1461 "Read device characteristic failed, rc=%d", rc);
1217 goto out_err3; 1462 goto out_err3;
1218 } 1463 }
1219 /* find the vaild cylinder size */ 1464 /* find the valid cylinder size */
1220 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1465 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1221 private->rdc_data.long_no_cyl) 1466 private->rdc_data.long_no_cyl)
1222 private->real_cyl = private->rdc_data.long_no_cyl; 1467 private->real_cyl = private->rdc_data.long_no_cyl;
1223 else 1468 else
1224 private->real_cyl = private->rdc_data.no_cyl; 1469 private->real_cyl = private->rdc_data.no_cyl;
1225 1470
1471 private->fcx_max_data = get_fcx_max_data(device);
1472
1226 readonly = dasd_device_is_ro(device); 1473 readonly = dasd_device_is_ro(device);
1227 if (readonly) 1474 if (readonly)
1228 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1475 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -1364,10 +1611,8 @@ static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1364 1611
1365static int dasd_eckd_start_analysis(struct dasd_block *block) 1612static int dasd_eckd_start_analysis(struct dasd_block *block)
1366{ 1613{
1367 struct dasd_eckd_private *private;
1368 struct dasd_ccw_req *init_cqr; 1614 struct dasd_ccw_req *init_cqr;
1369 1615
1370 private = (struct dasd_eckd_private *) block->base->private;
1371 init_cqr = dasd_eckd_analysis_ccw(block->base); 1616 init_cqr = dasd_eckd_analysis_ccw(block->base);
1372 if (IS_ERR(init_cqr)) 1617 if (IS_ERR(init_cqr))
1373 return PTR_ERR(init_cqr); 1618 return PTR_ERR(init_cqr);
@@ -1404,6 +1649,13 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1404 dasd_sfree_request(init_cqr, device); 1649 dasd_sfree_request(init_cqr, device);
1405 } 1650 }
1406 1651
1652 if (device->features & DASD_FEATURE_USERAW) {
1653 block->bp_block = DASD_RAW_BLOCKSIZE;
1654 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
1655 block->s2b_shift = 3;
1656 goto raw;
1657 }
1658
1407 if (status == INIT_CQR_UNFORMATTED) { 1659 if (status == INIT_CQR_UNFORMATTED) {
1408 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 1660 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1409 return -EMEDIUMTYPE; 1661 return -EMEDIUMTYPE;
@@ -1441,6 +1693,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1441 dev_warn(&device->cdev->dev, 1693 dev_warn(&device->cdev->dev,
1442 "Track 0 has no records following the VTOC\n"); 1694 "Track 0 has no records following the VTOC\n");
1443 } 1695 }
1696
1444 if (count_area != NULL && count_area->kl == 0) { 1697 if (count_area != NULL && count_area->kl == 0) {
1445 /* we found notthing violating our disk layout */ 1698 /* we found notthing violating our disk layout */
1446 if (dasd_check_blocksize(count_area->dl) == 0) 1699 if (dasd_check_blocksize(count_area->dl) == 0)
@@ -1456,6 +1709,8 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1456 block->s2b_shift++; 1709 block->s2b_shift++;
1457 1710
1458 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1711 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1712
1713raw:
1459 block->blocks = (private->real_cyl * 1714 block->blocks = (private->real_cyl *
1460 private->rdc_data.trk_per_cyl * 1715 private->rdc_data.trk_per_cyl *
1461 blk_per_trk); 1716 blk_per_trk);
@@ -1716,6 +1971,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1716 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1971 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1717 dasd_eckd_reset_ccw_to_base_io(cqr); 1972 dasd_eckd_reset_ccw_to_base_io(cqr);
1718 cqr->startdev = cqr->block->base; 1973 cqr->startdev = cqr->block->base;
1974 cqr->lpm = cqr->block->base->path_data.opm;
1719 } 1975 }
1720}; 1976};
1721 1977
@@ -1744,9 +2000,9 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1744 return dasd_default_erp_postaction; 2000 return dasd_default_erp_postaction;
1745} 2001}
1746 2002
1747 2003static void dasd_eckd_check_for_device_change(struct dasd_device *device,
1748static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 2004 struct dasd_ccw_req *cqr,
1749 struct irb *irb) 2005 struct irb *irb)
1750{ 2006{
1751 char mask; 2007 char mask;
1752 char *sense = NULL; 2008 char *sense = NULL;
@@ -1770,51 +2026,42 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1770 /* schedule worker to reload device */ 2026 /* schedule worker to reload device */
1771 dasd_reload_device(device); 2027 dasd_reload_device(device);
1772 } 2028 }
1773
1774 dasd_generic_handle_state_change(device); 2029 dasd_generic_handle_state_change(device);
1775 return; 2030 return;
1776 } 2031 }
1777 2032
2033 sense = dasd_get_sense(irb);
2034 if (!sense)
2035 return;
2036
1778 /* summary unit check */ 2037 /* summary unit check */
1779 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 2038 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
1780 (irb->ecw[7] == 0x0D)) { 2039 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
1781 dasd_alias_handle_summary_unit_check(device, irb); 2040 dasd_alias_handle_summary_unit_check(device, irb);
1782 return; 2041 return;
1783 } 2042 }
1784 2043
1785 sense = dasd_get_sense(irb);
1786 /* service information message SIM */ 2044 /* service information message SIM */
1787 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 2045 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
1788 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 2046 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1789 dasd_3990_erp_handle_sim(device, sense); 2047 dasd_3990_erp_handle_sim(device, sense);
1790 dasd_schedule_device_bh(device);
1791 return; 2048 return;
1792 } 2049 }
1793 2050
1794 if ((scsw_cc(&irb->scsw) == 1) && 2051 /* loss of device reservation is handled via base devices only
1795 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 2052 * as alias devices may be used with several bases
1796 (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && 2053 */
1797 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { 2054 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
1798 /* fake irb do nothing, they are handled elsewhere */ 2055 (sense[7] == 0x3F) &&
1799 dasd_schedule_device_bh(device); 2056 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1800 return; 2057 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
1801 } 2058 if (device->features & DASD_FEATURE_FAILONSLCK)
1802 2059 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
1803 if (!sense) { 2060 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
1804 /* just report other unsolicited interrupts */ 2061 dev_err(&device->cdev->dev,
1805 DBF_DEV_EVENT(DBF_ERR, device, "%s", 2062 "The device reservation was lost\n");
1806 "unsolicited interrupt received");
1807 } else {
1808 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1809 "unsolicited interrupt received "
1810 "(sense available)");
1811 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1812 } 2063 }
1813 2064}
1814 dasd_schedule_device_bh(device);
1815 return;
1816};
1817
1818 2065
1819static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 2066static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1820 struct dasd_device *startdev, 2067 struct dasd_device *startdev,
@@ -1995,7 +2242,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1995 cqr->memdev = startdev; 2242 cqr->memdev = startdev;
1996 cqr->block = block; 2243 cqr->block = block;
1997 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2244 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
1998 cqr->lpm = private->path_data.ppm; 2245 cqr->lpm = startdev->path_data.ppm;
1999 cqr->retries = 256; 2246 cqr->retries = 256;
2000 cqr->buildclk = get_clock(); 2247 cqr->buildclk = get_clock();
2001 cqr->status = DASD_CQR_FILLED; 2248 cqr->status = DASD_CQR_FILLED;
@@ -2015,7 +2262,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2015 unsigned int blk_per_trk, 2262 unsigned int blk_per_trk,
2016 unsigned int blksize) 2263 unsigned int blksize)
2017{ 2264{
2018 struct dasd_eckd_private *private;
2019 unsigned long *idaws; 2265 unsigned long *idaws;
2020 struct dasd_ccw_req *cqr; 2266 struct dasd_ccw_req *cqr;
2021 struct ccw1 *ccw; 2267 struct ccw1 *ccw;
@@ -2034,7 +2280,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2034 unsigned int recoffs; 2280 unsigned int recoffs;
2035 2281
2036 basedev = block->base; 2282 basedev = block->base;
2037 private = (struct dasd_eckd_private *) basedev->private;
2038 if (rq_data_dir(req) == READ) 2283 if (rq_data_dir(req) == READ)
2039 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2284 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2040 else if (rq_data_dir(req) == WRITE) 2285 else if (rq_data_dir(req) == WRITE)
@@ -2172,7 +2417,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2172 cqr->memdev = startdev; 2417 cqr->memdev = startdev;
2173 cqr->block = block; 2418 cqr->block = block;
2174 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2419 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2175 cqr->lpm = private->path_data.ppm; 2420 cqr->lpm = startdev->path_data.ppm;
2176 cqr->retries = 256; 2421 cqr->retries = 256;
2177 cqr->buildclk = get_clock(); 2422 cqr->buildclk = get_clock();
2178 cqr->status = DASD_CQR_FILLED; 2423 cqr->status = DASD_CQR_FILLED;
@@ -2307,8 +2552,7 @@ static int prepare_itcw(struct itcw *itcw,
2307 2552
2308 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 2553 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2309 &pfxdata, sizeof(pfxdata), total_data_size); 2554 &pfxdata, sizeof(pfxdata), total_data_size);
2310 2555 return IS_ERR(dcw) ? PTR_ERR(dcw) : 0;
2311 return rc;
2312} 2556}
2313 2557
2314static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 2558static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
@@ -2324,7 +2568,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2324 unsigned int blk_per_trk, 2568 unsigned int blk_per_trk,
2325 unsigned int blksize) 2569 unsigned int blksize)
2326{ 2570{
2327 struct dasd_eckd_private *private;
2328 struct dasd_ccw_req *cqr; 2571 struct dasd_ccw_req *cqr;
2329 struct req_iterator iter; 2572 struct req_iterator iter;
2330 struct bio_vec *bv; 2573 struct bio_vec *bv;
@@ -2337,9 +2580,14 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2337 struct tidaw *last_tidaw = NULL; 2580 struct tidaw *last_tidaw = NULL;
2338 int itcw_op; 2581 int itcw_op;
2339 size_t itcw_size; 2582 size_t itcw_size;
2583 u8 tidaw_flags;
2584 unsigned int seg_len, part_len, len_to_track_end;
2585 unsigned char new_track;
2586 sector_t recid, trkid;
2587 unsigned int offs;
2588 unsigned int count, count_to_trk_end;
2340 2589
2341 basedev = block->base; 2590 basedev = block->base;
2342 private = (struct dasd_eckd_private *) basedev->private;
2343 if (rq_data_dir(req) == READ) { 2591 if (rq_data_dir(req) == READ) {
2344 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2592 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2345 itcw_op = ITCW_OP_READ; 2593 itcw_op = ITCW_OP_READ;
@@ -2352,12 +2600,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2352 /* trackbased I/O needs address all memory via TIDAWs, 2600 /* trackbased I/O needs address all memory via TIDAWs,
2353 * not just for 64 bit addresses. This allows us to map 2601 * not just for 64 bit addresses. This allows us to map
2354 * each segment directly to one tidaw. 2602 * each segment directly to one tidaw.
2603 * In the case of write requests, additional tidaws may
2604 * be needed when a segment crosses a track boundary.
2355 */ 2605 */
2356 trkcount = last_trk - first_trk + 1; 2606 trkcount = last_trk - first_trk + 1;
2357 ctidaw = 0; 2607 ctidaw = 0;
2358 rq_for_each_segment(bv, req, iter) { 2608 rq_for_each_segment(bv, req, iter) {
2359 ++ctidaw; 2609 ++ctidaw;
2360 } 2610 }
2611 if (rq_data_dir(req) == WRITE)
2612 ctidaw += (last_trk - first_trk);
2361 2613
2362 /* Allocate the ccw request. */ 2614 /* Allocate the ccw request. */
2363 itcw_size = itcw_calc_size(0, ctidaw, 0); 2615 itcw_size = itcw_calc_size(0, ctidaw, 0);
@@ -2365,15 +2617,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2365 if (IS_ERR(cqr)) 2617 if (IS_ERR(cqr))
2366 return cqr; 2618 return cqr;
2367 2619
2368 cqr->cpmode = 1;
2369 cqr->startdev = startdev;
2370 cqr->memdev = startdev;
2371 cqr->block = block;
2372 cqr->expires = 100*HZ;
2373 cqr->buildclk = get_clock();
2374 cqr->status = DASD_CQR_FILLED;
2375 cqr->retries = 10;
2376
2377 /* transfer length factor: how many bytes to read from the last track */ 2620 /* transfer length factor: how many bytes to read from the last track */
2378 if (first_trk == last_trk) 2621 if (first_trk == last_trk)
2379 tlf = last_offs - first_offs + 1; 2622 tlf = last_offs - first_offs + 1;
@@ -2382,8 +2625,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2382 tlf *= blksize; 2625 tlf *= blksize;
2383 2626
2384 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2627 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2628 if (IS_ERR(itcw)) {
2629 dasd_sfree_request(cqr, startdev);
2630 return ERR_PTR(-EINVAL);
2631 }
2385 cqr->cpaddr = itcw_get_tcw(itcw); 2632 cqr->cpaddr = itcw_get_tcw(itcw);
2386
2387 if (prepare_itcw(itcw, first_trk, last_trk, 2633 if (prepare_itcw(itcw, first_trk, last_trk,
2388 cmd, basedev, startdev, 2634 cmd, basedev, startdev,
2389 first_offs + 1, 2635 first_offs + 1,
@@ -2396,31 +2642,70 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2396 dasd_sfree_request(cqr, startdev); 2642 dasd_sfree_request(cqr, startdev);
2397 return ERR_PTR(-EAGAIN); 2643 return ERR_PTR(-EAGAIN);
2398 } 2644 }
2399 2645 len_to_track_end = 0;
2400 /* 2646 /*
2401 * A tidaw can address 4k of memory, but must not cross page boundaries 2647 * A tidaw can address 4k of memory, but must not cross page boundaries
2402 * We can let the block layer handle this by setting 2648 * We can let the block layer handle this by setting
2403 * blk_queue_segment_boundary to page boundaries and 2649 * blk_queue_segment_boundary to page boundaries and
2404 * blk_max_segment_size to page size when setting up the request queue. 2650 * blk_max_segment_size to page size when setting up the request queue.
2651 * For write requests, a TIDAW must not cross track boundaries, because
2652 * we have to set the CBC flag on the last tidaw for each track.
2405 */ 2653 */
2406 rq_for_each_segment(bv, req, iter) { 2654 if (rq_data_dir(req) == WRITE) {
2407 dst = page_address(bv->bv_page) + bv->bv_offset; 2655 new_track = 1;
2408 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); 2656 recid = first_rec;
2409 if (IS_ERR(last_tidaw)) 2657 rq_for_each_segment(bv, req, iter) {
2410 return (struct dasd_ccw_req *)last_tidaw; 2658 dst = page_address(bv->bv_page) + bv->bv_offset;
2659 seg_len = bv->bv_len;
2660 while (seg_len) {
2661 if (new_track) {
2662 trkid = recid;
2663 offs = sector_div(trkid, blk_per_trk);
2664 count_to_trk_end = blk_per_trk - offs;
2665 count = min((last_rec - recid + 1),
2666 (sector_t)count_to_trk_end);
2667 len_to_track_end = count * blksize;
2668 recid += count;
2669 new_track = 0;
2670 }
2671 part_len = min(seg_len, len_to_track_end);
2672 seg_len -= part_len;
2673 len_to_track_end -= part_len;
2674 /* We need to end the tidaw at track end */
2675 if (!len_to_track_end) {
2676 new_track = 1;
2677 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
2678 } else
2679 tidaw_flags = 0;
2680 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
2681 dst, part_len);
2682 if (IS_ERR(last_tidaw))
2683 return ERR_PTR(-EINVAL);
2684 dst += part_len;
2685 }
2686 }
2687 } else {
2688 rq_for_each_segment(bv, req, iter) {
2689 dst = page_address(bv->bv_page) + bv->bv_offset;
2690 last_tidaw = itcw_add_tidaw(itcw, 0x00,
2691 dst, bv->bv_len);
2692 if (IS_ERR(last_tidaw))
2693 return ERR_PTR(-EINVAL);
2694 }
2411 } 2695 }
2412 2696 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2413 last_tidaw->flags |= 0x80; 2697 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
2414 itcw_finalize(itcw); 2698 itcw_finalize(itcw);
2415 2699
2416 if (blk_noretry_request(req) || 2700 if (blk_noretry_request(req) ||
2417 block->base->features & DASD_FEATURE_FAILFAST) 2701 block->base->features & DASD_FEATURE_FAILFAST)
2418 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2702 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2703 cqr->cpmode = 1;
2419 cqr->startdev = startdev; 2704 cqr->startdev = startdev;
2420 cqr->memdev = startdev; 2705 cqr->memdev = startdev;
2421 cqr->block = block; 2706 cqr->block = block;
2422 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2707 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2423 cqr->lpm = private->path_data.ppm; 2708 cqr->lpm = startdev->path_data.ppm;
2424 cqr->retries = 256; 2709 cqr->retries = 256;
2425 cqr->buildclk = get_clock(); 2710 cqr->buildclk = get_clock();
2426 cqr->status = DASD_CQR_FILLED; 2711 cqr->status = DASD_CQR_FILLED;
@@ -2431,11 +2716,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2431 struct dasd_block *block, 2716 struct dasd_block *block,
2432 struct request *req) 2717 struct request *req)
2433{ 2718{
2434 int tpm, cmdrtd, cmdwtd; 2719 int cmdrtd, cmdwtd;
2435 int use_prefix; 2720 int use_prefix;
2436#if defined(CONFIG_64BIT) 2721 int fcx_multitrack;
2437 int fcx_in_css, fcx_in_gneq, fcx_in_features;
2438#endif
2439 struct dasd_eckd_private *private; 2722 struct dasd_eckd_private *private;
2440 struct dasd_device *basedev; 2723 struct dasd_device *basedev;
2441 sector_t first_rec, last_rec; 2724 sector_t first_rec, last_rec;
@@ -2443,6 +2726,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2443 unsigned int first_offs, last_offs; 2726 unsigned int first_offs, last_offs;
2444 unsigned int blk_per_trk, blksize; 2727 unsigned int blk_per_trk, blksize;
2445 int cdlspecial; 2728 int cdlspecial;
2729 unsigned int data_size;
2446 struct dasd_ccw_req *cqr; 2730 struct dasd_ccw_req *cqr;
2447 2731
2448 basedev = block->base; 2732 basedev = block->base;
@@ -2461,15 +2745,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2461 last_offs = sector_div(last_trk, blk_per_trk); 2745 last_offs = sector_div(last_trk, blk_per_trk);
2462 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2746 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2463 2747
2464 /* is transport mode supported? */ 2748 fcx_multitrack = private->features.feature[40] & 0x20;
2465#if defined(CONFIG_64BIT) 2749 data_size = blk_rq_bytes(req);
2466 fcx_in_css = css_general_characteristics.fcx; 2750 /* tpm write request add CBC data on each track boundary */
2467 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2751 if (rq_data_dir(req) == WRITE)
2468 fcx_in_features = private->features.feature[40] & 0x80; 2752 data_size += (last_trk - first_trk) * 4;
2469 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2470#else
2471 tpm = 0;
2472#endif
2473 2753
2474 /* is read track data and write track data in command mode supported? */ 2754 /* is read track data and write track data in command mode supported? */
2475 cmdrtd = private->features.feature[9] & 0x20; 2755 cmdrtd = private->features.feature[9] & 0x20;
@@ -2479,13 +2759,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2479 cqr = NULL; 2759 cqr = NULL;
2480 if (cdlspecial || dasd_page_cache) { 2760 if (cdlspecial || dasd_page_cache) {
2481 /* do nothing, just fall through to the cmd mode single case */ 2761 /* do nothing, just fall through to the cmd mode single case */
2482 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { 2762 } else if ((data_size <= private->fcx_max_data)
2763 && (fcx_multitrack || (first_trk == last_trk))) {
2483 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 2764 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2484 first_rec, last_rec, 2765 first_rec, last_rec,
2485 first_trk, last_trk, 2766 first_trk, last_trk,
2486 first_offs, last_offs, 2767 first_offs, last_offs,
2487 blk_per_trk, blksize); 2768 blk_per_trk, blksize);
2488 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2769 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2770 (PTR_ERR(cqr) != -ENOMEM))
2489 cqr = NULL; 2771 cqr = NULL;
2490 } else if (use_prefix && 2772 } else if (use_prefix &&
2491 (((rq_data_dir(req) == READ) && cmdrtd) || 2773 (((rq_data_dir(req) == READ) && cmdrtd) ||
@@ -2495,7 +2777,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2495 first_trk, last_trk, 2777 first_trk, last_trk,
2496 first_offs, last_offs, 2778 first_offs, last_offs,
2497 blk_per_trk, blksize); 2779 blk_per_trk, blksize);
2498 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2780 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2781 (PTR_ERR(cqr) != -ENOMEM))
2499 cqr = NULL; 2782 cqr = NULL;
2500 } 2783 }
2501 if (!cqr) 2784 if (!cqr)
@@ -2507,6 +2790,133 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2507 return cqr; 2790 return cqr;
2508} 2791}
2509 2792
2793static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
2794 struct dasd_block *block,
2795 struct request *req)
2796{
2797 unsigned long *idaws;
2798 struct dasd_device *basedev;
2799 struct dasd_ccw_req *cqr;
2800 struct ccw1 *ccw;
2801 struct req_iterator iter;
2802 struct bio_vec *bv;
2803 char *dst;
2804 unsigned char cmd;
2805 unsigned int trkcount;
2806 unsigned int seg_len, len_to_track_end;
2807 unsigned int first_offs;
2808 unsigned int cidaw, cplength, datasize;
2809 sector_t first_trk, last_trk;
2810 unsigned int pfx_datasize;
2811
2812 /*
2813 * raw track access needs to be mutiple of 64k and on 64k boundary
2814 */
2815 if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
2816 cqr = ERR_PTR(-EINVAL);
2817 goto out;
2818 }
2819 if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
2820 DASD_RAW_SECTORS_PER_TRACK) != 0) {
2821 cqr = ERR_PTR(-EINVAL);
2822 goto out;
2823 }
2824
2825 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
2826 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
2827 DASD_RAW_SECTORS_PER_TRACK;
2828 trkcount = last_trk - first_trk + 1;
2829 first_offs = 0;
2830 basedev = block->base;
2831
2832 if (rq_data_dir(req) == READ)
2833 cmd = DASD_ECKD_CCW_READ_TRACK;
2834 else if (rq_data_dir(req) == WRITE)
2835 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
2836 else {
2837 cqr = ERR_PTR(-EINVAL);
2838 goto out;
2839 }
2840
2841 /*
2842 * Raw track based I/O needs IDAWs for each page,
2843 * and not just for 64 bit addresses.
2844 */
2845 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
2846
2847 /* 1x prefix + one read/write ccw per track */
2848 cplength = 1 + trkcount;
2849
2850 /*
2851 * struct PFX_eckd_data has up to 2 byte as extended parameter
2852 * this is needed for write full track and has to be mentioned
2853 * separately
2854 * add 8 instead of 2 to keep 8 byte boundary
2855 */
2856 pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
2857
2858 datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
2859
2860 /* Allocate the ccw request. */
2861 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
2862 datasize, startdev);
2863 if (IS_ERR(cqr))
2864 goto out;
2865 ccw = cqr->cpaddr;
2866
2867 if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
2868 basedev, startdev, 1 /* format */, first_offs + 1,
2869 trkcount, 0, 0) == -EAGAIN) {
2870 /* Clock not in sync and XRC is enabled.
2871 * Try again later.
2872 */
2873 dasd_sfree_request(cqr, startdev);
2874 cqr = ERR_PTR(-EAGAIN);
2875 goto out;
2876 }
2877
2878 idaws = (unsigned long *)(cqr->data + pfx_datasize);
2879
2880 len_to_track_end = 0;
2881
2882 rq_for_each_segment(bv, req, iter) {
2883 dst = page_address(bv->bv_page) + bv->bv_offset;
2884 seg_len = bv->bv_len;
2885 if (!len_to_track_end) {
2886 ccw[-1].flags |= CCW_FLAG_CC;
2887 ccw->cmd_code = cmd;
2888 /* maximum 3390 track size */
2889 ccw->count = 57326;
2890 /* 64k map to one track */
2891 len_to_track_end = 65536;
2892 ccw->cda = (__u32)(addr_t)idaws;
2893 ccw->flags |= CCW_FLAG_IDA;
2894 ccw->flags |= CCW_FLAG_SLI;
2895 ccw++;
2896 }
2897 len_to_track_end -= seg_len;
2898 idaws = idal_create_words(idaws, dst, seg_len);
2899 }
2900
2901 if (blk_noretry_request(req) ||
2902 block->base->features & DASD_FEATURE_FAILFAST)
2903 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2904 cqr->startdev = startdev;
2905 cqr->memdev = startdev;
2906 cqr->block = block;
2907 cqr->expires = startdev->default_expires * HZ;
2908 cqr->lpm = startdev->path_data.ppm;
2909 cqr->retries = 256;
2910 cqr->buildclk = get_clock();
2911 cqr->status = DASD_CQR_FILLED;
2912
2913 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2914 cqr = NULL;
2915out:
2916 return cqr;
2917}
2918
2919
2510static int 2920static int
2511dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2921dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2512{ 2922{
@@ -2611,7 +3021,10 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2611 3021
2612 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 3022 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2613 private->count++; 3023 private->count++;
2614 cqr = dasd_eckd_build_cp(startdev, block, req); 3024 if ((base->features & DASD_FEATURE_USERAW))
3025 cqr = dasd_raw_build_cp(startdev, block, req);
3026 else
3027 cqr = dasd_eckd_build_cp(startdev, block, req);
2615 if (IS_ERR(cqr)) 3028 if (IS_ERR(cqr))
2616 private->count--; 3029 private->count--;
2617 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 3030 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
@@ -2699,6 +3112,8 @@ dasd_eckd_release(struct dasd_device *device)
2699 cqr->status = DASD_CQR_FILLED; 3112 cqr->status = DASD_CQR_FILLED;
2700 3113
2701 rc = dasd_sleep_on_immediatly(cqr); 3114 rc = dasd_sleep_on_immediatly(cqr);
3115 if (!rc)
3116 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2702 3117
2703 if (useglobal) 3118 if (useglobal)
2704 mutex_unlock(&dasd_reserve_mutex); 3119 mutex_unlock(&dasd_reserve_mutex);
@@ -2752,6 +3167,8 @@ dasd_eckd_reserve(struct dasd_device *device)
2752 cqr->status = DASD_CQR_FILLED; 3167 cqr->status = DASD_CQR_FILLED;
2753 3168
2754 rc = dasd_sleep_on_immediatly(cqr); 3169 rc = dasd_sleep_on_immediatly(cqr);
3170 if (!rc)
3171 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2755 3172
2756 if (useglobal) 3173 if (useglobal)
2757 mutex_unlock(&dasd_reserve_mutex); 3174 mutex_unlock(&dasd_reserve_mutex);
@@ -2804,6 +3221,76 @@ dasd_eckd_steal_lock(struct dasd_device *device)
2804 cqr->status = DASD_CQR_FILLED; 3221 cqr->status = DASD_CQR_FILLED;
2805 3222
2806 rc = dasd_sleep_on_immediatly(cqr); 3223 rc = dasd_sleep_on_immediatly(cqr);
3224 if (!rc)
3225 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3226
3227 if (useglobal)
3228 mutex_unlock(&dasd_reserve_mutex);
3229 else
3230 dasd_sfree_request(cqr, cqr->memdev);
3231 return rc;
3232}
3233
3234/*
3235 * SNID - Sense Path Group ID
3236 * This ioctl may be used in situations where I/O is stalled due to
3237 * a reserve, so if the normal dasd_smalloc_request fails, we use the
3238 * preallocated dasd_reserve_req.
3239 */
3240static int dasd_eckd_snid(struct dasd_device *device,
3241 void __user *argp)
3242{
3243 struct dasd_ccw_req *cqr;
3244 int rc;
3245 struct ccw1 *ccw;
3246 int useglobal;
3247 struct dasd_snid_ioctl_data usrparm;
3248
3249 if (!capable(CAP_SYS_ADMIN))
3250 return -EACCES;
3251
3252 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
3253 return -EFAULT;
3254
3255 useglobal = 0;
3256 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
3257 sizeof(struct dasd_snid_data), device);
3258 if (IS_ERR(cqr)) {
3259 mutex_lock(&dasd_reserve_mutex);
3260 useglobal = 1;
3261 cqr = &dasd_reserve_req->cqr;
3262 memset(cqr, 0, sizeof(*cqr));
3263 memset(&dasd_reserve_req->ccw, 0,
3264 sizeof(dasd_reserve_req->ccw));
3265 cqr->cpaddr = &dasd_reserve_req->ccw;
3266 cqr->data = &dasd_reserve_req->data;
3267 cqr->magic = DASD_ECKD_MAGIC;
3268 }
3269 ccw = cqr->cpaddr;
3270 ccw->cmd_code = DASD_ECKD_CCW_SNID;
3271 ccw->flags |= CCW_FLAG_SLI;
3272 ccw->count = 12;
3273 ccw->cda = (__u32)(addr_t) cqr->data;
3274 cqr->startdev = device;
3275 cqr->memdev = device;
3276 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
3277 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3278 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
3279 cqr->retries = 5;
3280 cqr->expires = 10 * HZ;
3281 cqr->buildclk = get_clock();
3282 cqr->status = DASD_CQR_FILLED;
3283 cqr->lpm = usrparm.path_mask;
3284
3285 rc = dasd_sleep_on_immediatly(cqr);
3286 /* verify that I/O processing didn't modify the path mask */
3287 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
3288 rc = -EIO;
3289 if (!rc) {
3290 usrparm.data = *((struct dasd_snid_data *)cqr->data);
3291 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
3292 rc = -EFAULT;
3293 }
2807 3294
2808 if (useglobal) 3295 if (useglobal)
2809 mutex_unlock(&dasd_reserve_mutex); 3296 mutex_unlock(&dasd_reserve_mutex);
@@ -3047,6 +3534,8 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
3047 return dasd_eckd_reserve(device); 3534 return dasd_eckd_reserve(device);
3048 case BIODASDSLCK: 3535 case BIODASDSLCK:
3049 return dasd_eckd_steal_lock(device); 3536 return dasd_eckd_steal_lock(device);
3537 case BIODASDSNID:
3538 return dasd_eckd_snid(device, argp);
3050 case BIODASDSYMMIO: 3539 case BIODASDSYMMIO:
3051 return dasd_symm_io(device, argp); 3540 return dasd_symm_io(device, argp);
3052 default: 3541 default:
@@ -3093,19 +3582,19 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
3093 char *reason) 3582 char *reason)
3094{ 3583{
3095 u64 *sense; 3584 u64 *sense;
3585 u64 *stat;
3096 3586
3097 sense = (u64 *) dasd_get_sense(irb); 3587 sense = (u64 *) dasd_get_sense(irb);
3588 stat = (u64 *) &irb->scsw;
3098 if (sense) { 3589 if (sense) {
3099 DBF_DEV_EVENT(DBF_EMERG, device, 3590 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
3100 "%s: %s %02x%02x%02x %016llx %016llx %016llx " 3591 "%016llx %016llx %016llx %016llx",
3101 "%016llx", reason, 3592 reason, *stat, *((u32 *) (stat + 1)),
3102 scsw_is_tm(&irb->scsw) ? "t" : "c", 3593 sense[0], sense[1], sense[2], sense[3]);
3103 scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3104 scsw_dstat(&irb->scsw), sense[0], sense[1],
3105 sense[2], sense[3]);
3106 } else { 3594 } else {
3107 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 3595 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
3108 "SORRY - NO VALID SENSE AVAILABLE\n"); 3596 reason, *stat, *((u32 *) (stat + 1)),
3597 "NO VALID SENSE");
3109 } 3598 }
3110} 3599}
3111 3600
@@ -3131,9 +3620,12 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3131 " I/O status report for device %s:\n", 3620 " I/O status report for device %s:\n",
3132 dev_name(&device->cdev->dev)); 3621 dev_name(&device->cdev->dev));
3133 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3622 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3134 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3623 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3135 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3624 "CS:%02X RC:%d\n",
3136 scsw_cc(&irb->scsw), req ? req->intrc : 0); 3625 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3626 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3627 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3628 req ? req->intrc : 0);
3137 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3629 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3138 " device %s: Failing CCW: %p\n", 3630 " device %s: Failing CCW: %p\n",
3139 dev_name(&device->cdev->dev), 3631 dev_name(&device->cdev->dev),
@@ -3218,10 +3710,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3218{ 3710{
3219 char *page; 3711 char *page;
3220 int len, sl, sct, residual; 3712 int len, sl, sct, residual;
3221
3222 struct tsb *tsb; 3713 struct tsb *tsb;
3223 u8 *sense; 3714 u8 *sense, *rcq;
3224
3225 3715
3226 page = (char *) get_zeroed_page(GFP_ATOMIC); 3716 page = (char *) get_zeroed_page(GFP_ATOMIC);
3227 if (page == NULL) { 3717 if (page == NULL) {
@@ -3234,11 +3724,13 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3234 " I/O status report for device %s:\n", 3724 " I/O status report for device %s:\n",
3235 dev_name(&device->cdev->dev)); 3725 dev_name(&device->cdev->dev));
3236 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3726 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3237 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " 3727 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3238 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3728 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
3239 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3729 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3240 scsw_cc(&irb->scsw), req->intrc, 3730 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3241 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3731 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3732 irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
3733 req ? req->intrc : 0);
3242 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3734 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3243 " device %s: Failing TCW: %p\n", 3735 " device %s: Failing TCW: %p\n",
3244 dev_name(&device->cdev->dev), 3736 dev_name(&device->cdev->dev),
@@ -3246,7 +3738,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3246 3738
3247 tsb = NULL; 3739 tsb = NULL;
3248 sense = NULL; 3740 sense = NULL;
3249 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01)) 3741 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
3250 tsb = tcw_get_tsb( 3742 tsb = tcw_get_tsb(
3251 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3743 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3252 3744
@@ -3285,12 +3777,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3285 case 2: /* ts_ddpc */ 3777 case 2: /* ts_ddpc */
3286 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3778 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3287 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3779 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3288 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3780 for (sl = 0; sl < 2; sl++) {
3289 " tsb->tsa.ddpc.rcq: "); 3781 len += sprintf(page + len,
3290 for (sl = 0; sl < 16; sl++) { 3782 KERN_ERR PRINTK_HEADER
3783 " tsb->tsa.ddpc.rcq %2d-%2d: ",
3784 (8 * sl), ((8 * sl) + 7));
3785 rcq = tsb->tsa.ddpc.rcq;
3291 for (sct = 0; sct < 8; sct++) { 3786 for (sct = 0; sct < 8; sct++) {
3292 len += sprintf(page + len, " %02x", 3787 len += sprintf(page + len, " %02x",
3293 tsb->tsa.ddpc.rcq[sl]); 3788 rcq[8 * sl + sct]);
3294 } 3789 }
3295 len += sprintf(page + len, "\n"); 3790 len += sprintf(page + len, "\n");
3296 } 3791 }
@@ -3344,7 +3839,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3344static void dasd_eckd_dump_sense(struct dasd_device *device, 3839static void dasd_eckd_dump_sense(struct dasd_device *device,
3345 struct dasd_ccw_req *req, struct irb *irb) 3840 struct dasd_ccw_req *req, struct irb *irb)
3346{ 3841{
3347 if (req && scsw_is_tm(&req->irb.scsw)) 3842 if (scsw_is_tm(&irb->scsw))
3348 dasd_eckd_dump_sense_tcw(device, req, irb); 3843 dasd_eckd_dump_sense_tcw(device, req, irb);
3349 else 3844 else
3350 dasd_eckd_dump_sense_ccw(device, req, irb); 3845 dasd_eckd_dump_sense_ccw(device, req, irb);
@@ -3479,14 +3974,17 @@ out_err:
3479} 3974}
3480 3975
3481static struct ccw_driver dasd_eckd_driver = { 3976static struct ccw_driver dasd_eckd_driver = {
3482 .name = "dasd-eckd", 3977 .driver = {
3483 .owner = THIS_MODULE, 3978 .name = "dasd-eckd",
3979 .owner = THIS_MODULE,
3980 },
3484 .ids = dasd_eckd_ids, 3981 .ids = dasd_eckd_ids,
3485 .probe = dasd_eckd_probe, 3982 .probe = dasd_eckd_probe,
3486 .remove = dasd_generic_remove, 3983 .remove = dasd_generic_remove,
3487 .set_offline = dasd_generic_set_offline, 3984 .set_offline = dasd_generic_set_offline,
3488 .set_online = dasd_eckd_set_online, 3985 .set_online = dasd_eckd_set_online,
3489 .notify = dasd_generic_notify, 3986 .notify = dasd_generic_notify,
3987 .path_event = dasd_generic_path_event,
3490 .freeze = dasd_generic_pm_freeze, 3988 .freeze = dasd_generic_pm_freeze,
3491 .thaw = dasd_generic_restore_device, 3989 .thaw = dasd_generic_restore_device,
3492 .restore = dasd_generic_restore_device, 3990 .restore = dasd_generic_restore_device,
@@ -3510,10 +4008,11 @@ static struct dasd_discipline dasd_eckd_discipline = {
3510 .owner = THIS_MODULE, 4008 .owner = THIS_MODULE,
3511 .name = "ECKD", 4009 .name = "ECKD",
3512 .ebcname = "ECKD", 4010 .ebcname = "ECKD",
3513 .max_blocks = 240, 4011 .max_blocks = 190,
3514 .check_device = dasd_eckd_check_characteristics, 4012 .check_device = dasd_eckd_check_characteristics,
3515 .uncheck_device = dasd_eckd_uncheck_device, 4013 .uncheck_device = dasd_eckd_uncheck_device,
3516 .do_analysis = dasd_eckd_do_analysis, 4014 .do_analysis = dasd_eckd_do_analysis,
4015 .verify_path = dasd_eckd_verify_path,
3517 .ready_to_online = dasd_eckd_ready_to_online, 4016 .ready_to_online = dasd_eckd_ready_to_online,
3518 .online_to_ready = dasd_eckd_online_to_ready, 4017 .online_to_ready = dasd_eckd_online_to_ready,
3519 .fill_geometry = dasd_eckd_fill_geometry, 4018 .fill_geometry = dasd_eckd_fill_geometry,
@@ -3523,7 +4022,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
3523 .format_device = dasd_eckd_format_device, 4022 .format_device = dasd_eckd_format_device,
3524 .erp_action = dasd_eckd_erp_action, 4023 .erp_action = dasd_eckd_erp_action,
3525 .erp_postaction = dasd_eckd_erp_postaction, 4024 .erp_postaction = dasd_eckd_erp_postaction,
3526 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 4025 .check_for_device_change = dasd_eckd_check_for_device_change,
3527 .build_cp = dasd_eckd_build_alias_cp, 4026 .build_cp = dasd_eckd_build_alias_cp,
3528 .free_cp = dasd_eckd_free_alias_cp, 4027 .free_cp = dasd_eckd_free_alias_cp,
3529 .dump_sense = dasd_eckd_dump_sense, 4028 .dump_sense = dasd_eckd_dump_sense,
@@ -3546,11 +4045,19 @@ dasd_eckd_init(void)
3546 GFP_KERNEL | GFP_DMA); 4045 GFP_KERNEL | GFP_DMA);
3547 if (!dasd_reserve_req) 4046 if (!dasd_reserve_req)
3548 return -ENOMEM; 4047 return -ENOMEM;
4048 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
4049 GFP_KERNEL | GFP_DMA);
4050 if (!path_verification_worker) {
4051 kfree(dasd_reserve_req);
4052 return -ENOMEM;
4053 }
3549 ret = ccw_driver_register(&dasd_eckd_driver); 4054 ret = ccw_driver_register(&dasd_eckd_driver);
3550 if (!ret) 4055 if (!ret)
3551 wait_for_device_probe(); 4056 wait_for_device_probe();
3552 else 4057 else {
4058 kfree(path_verification_worker);
3553 kfree(dasd_reserve_req); 4059 kfree(dasd_reserve_req);
4060 }
3554 return ret; 4061 return ret;
3555} 4062}
3556 4063
@@ -3558,6 +4065,7 @@ static void __exit
3558dasd_eckd_cleanup(void) 4065dasd_eckd_cleanup(void)
3559{ 4066{
3560 ccw_driver_unregister(&dasd_eckd_driver); 4067 ccw_driver_unregister(&dasd_eckd_driver);
4068 kfree(path_verification_worker);
3561 kfree(dasd_reserve_req); 4069 kfree(dasd_reserve_req);
3562} 4070}
3563 4071
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 0eb49655a6cd..4a688a873a77 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -27,6 +27,7 @@
27#define DASD_ECKD_CCW_WRITE_CKD 0x1d 27#define DASD_ECKD_CCW_WRITE_CKD 0x1d
28#define DASD_ECKD_CCW_READ_CKD 0x1e 28#define DASD_ECKD_CCW_READ_CKD 0x1e
29#define DASD_ECKD_CCW_PSF 0x27 29#define DASD_ECKD_CCW_PSF 0x27
30#define DASD_ECKD_CCW_SNID 0x34
30#define DASD_ECKD_CCW_RSSD 0x3e 31#define DASD_ECKD_CCW_RSSD 0x3e
31#define DASD_ECKD_CCW_LOCATE_RECORD 0x47 32#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
32#define DASD_ECKD_CCW_SNSS 0x54 33#define DASD_ECKD_CCW_SNSS 0x54
@@ -36,14 +37,17 @@
36#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d 37#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
37#define DASD_ECKD_CCW_READ_KD_MT 0x8e 38#define DASD_ECKD_CCW_READ_KD_MT 0x8e
38#define DASD_ECKD_CCW_RELEASE 0x94 39#define DASD_ECKD_CCW_RELEASE 0x94
40#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 41#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 42#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5 43#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
42#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6 44#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
43#define DASD_ECKD_CCW_RESERVE 0xB4 45#define DASD_ECKD_CCW_RESERVE 0xB4
46#define DASD_ECKD_CCW_READ_TRACK 0xDE
44#define DASD_ECKD_CCW_PFX 0xE7 47#define DASD_ECKD_CCW_PFX 0xE7
45#define DASD_ECKD_CCW_PFX_READ 0xEA 48#define DASD_ECKD_CCW_PFX_READ 0xEA
46#define DASD_ECKD_CCW_RSCK 0xF9 49#define DASD_ECKD_CCW_RSCK 0xF9
50#define DASD_ECKD_CCW_RCD 0xFA
47 51
48/* 52/*
49 * Perform Subsystem Function / Sub-Orders 53 * Perform Subsystem Function / Sub-Orders
@@ -56,6 +60,11 @@
56 */ 60 */
57#define LV_COMPAT_CYL 0xFFFE 61#define LV_COMPAT_CYL 0xFFFE
58 62
63
64#define FCX_MAX_DATA_FACTOR 65536
65#define DASD_ECKD_RCD_DATA_SIZE 256
66
67
59/***************************************************************************** 68/*****************************************************************************
60 * SECTION: Type Definitions 69 * SECTION: Type Definitions
61 ****************************************************************************/ 70 ****************************************************************************/
@@ -330,12 +339,6 @@ struct dasd_gneq {
330 __u8 reserved2[22]; 339 __u8 reserved2[22];
331} __attribute__ ((packed)); 340} __attribute__ ((packed));
332 341
333struct dasd_eckd_path {
334 __u8 opm;
335 __u8 ppm;
336 __u8 npm;
337};
338
339struct dasd_rssd_features { 342struct dasd_rssd_features {
340 char feature[256]; 343 char feature[256];
341} __attribute__((packed)); 344} __attribute__((packed));
@@ -441,7 +444,6 @@ struct dasd_eckd_private {
441 struct vd_sneq *vdsneq; 444 struct vd_sneq *vdsneq;
442 struct dasd_gneq *gneq; 445 struct dasd_gneq *gneq;
443 446
444 struct dasd_eckd_path path_data;
445 struct eckd_count count_area[5]; 447 struct eckd_count count_area[5];
446 int init_cqr_status; 448 int init_cqr_status;
447 int uses_cdl; 449 int uses_cdl;
@@ -454,6 +456,8 @@ struct dasd_eckd_private {
454 struct alias_pav_group *pavgroup; 456 struct alias_pav_group *pavgroup;
455 struct alias_lcu *lcu; 457 struct alias_lcu *lcu;
456 int count; 458 int count;
459
460 u32 fcx_max_data;
457}; 461};
458 462
459 463
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 7158f9528ecc..77f778b7b070 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -17,7 +17,6 @@
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/poll.h> 18#include <linux/poll.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/smp_lock.h>
21#include <linux/err.h> 20#include <linux/err.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23 22
@@ -474,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device)
474 cqr->retries = 255; 473 cqr->retries = 255;
475 cqr->expires = 10 * HZ; 474 cqr->expires = 10 * HZ;
476 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
476 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
477 477
478 ccw = cqr->cpaddr; 478 ccw = cqr->cpaddr;
479 ccw->cmd_code = DASD_ECKD_CCW_SNSS; 479 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
@@ -670,6 +670,7 @@ static const struct file_operations dasd_eer_fops = {
670 .read = &dasd_eer_read, 670 .read = &dasd_eer_read,
671 .poll = &dasd_eer_poll, 671 .poll = &dasd_eer_poll,
672 .owner = THIS_MODULE, 672 .owner = THIS_MODULE,
673 .llseek = noop_llseek,
673}; 674};
674 675
675static struct miscdevice *dasd_eer_dev = NULL; 676static struct miscdevice *dasd_eer_dev = NULL;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 7656384a811d..0eafe2e421e7 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
96 DBF_DEV_EVENT(DBF_DEBUG, device, 96 DBF_DEV_EVENT(DBF_DEBUG, device,
97 "default ERP called (%i retries left)", 97 "default ERP called (%i retries left)",
98 cqr->retries); 98 cqr->retries);
99 cqr->lpm = LPM_ANYPATH; 99 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
100 cqr->lpm = device->path_data.opm;
100 cqr->status = DASD_CQR_FILLED; 101 cqr->status = DASD_CQR_FILLED;
101 } else { 102 } else {
102 pr_err("%s: default ERP has run out of retries and failed\n", 103 pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bec5486e0e6d..4b71b1164868 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -65,14 +65,17 @@ dasd_fba_set_online(struct ccw_device *cdev)
65} 65}
66 66
67static struct ccw_driver dasd_fba_driver = { 67static struct ccw_driver dasd_fba_driver = {
68 .name = "dasd-fba", 68 .driver = {
69 .owner = THIS_MODULE, 69 .name = "dasd-fba",
70 .owner = THIS_MODULE,
71 },
70 .ids = dasd_fba_ids, 72 .ids = dasd_fba_ids,
71 .probe = dasd_fba_probe, 73 .probe = dasd_fba_probe,
72 .remove = dasd_generic_remove, 74 .remove = dasd_generic_remove,
73 .set_offline = dasd_generic_set_offline, 75 .set_offline = dasd_generic_set_offline,
74 .set_online = dasd_fba_set_online, 76 .set_online = dasd_fba_set_online,
75 .notify = dasd_generic_notify, 77 .notify = dasd_generic_notify,
78 .path_event = dasd_generic_path_event,
76 .freeze = dasd_generic_pm_freeze, 79 .freeze = dasd_generic_pm_freeze,
77 .thaw = dasd_generic_restore_device, 80 .thaw = dasd_generic_restore_device,
78 .restore = dasd_generic_restore_device, 81 .restore = dasd_generic_restore_device,
@@ -164,6 +167,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
164 } 167 }
165 168
166 device->default_expires = DASD_EXPIRES; 169 device->default_expires = DASD_EXPIRES;
170 device->path_data.opm = LPM_ANYPATH;
167 171
168 readonly = dasd_device_is_ro(device); 172 readonly = dasd_device_is_ro(device);
169 if (readonly) 173 if (readonly)
@@ -231,24 +235,16 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
231 return NULL; 235 return NULL;
232} 236}
233 237
234static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, 238static void dasd_fba_check_for_device_change(struct dasd_device *device,
235 struct irb *irb) 239 struct dasd_ccw_req *cqr,
240 struct irb *irb)
236{ 241{
237 char mask; 242 char mask;
238 243
239 /* first of all check for state change pending interrupt */ 244 /* first of all check for state change pending interrupt */
240 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 245 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
241 if ((irb->scsw.cmd.dstat & mask) == mask) { 246 if ((irb->scsw.cmd.dstat & mask) == mask)
242 dasd_generic_handle_state_change(device); 247 dasd_generic_handle_state_change(device);
243 return;
244 }
245
246 /* check for unsolicited interrupts */
247 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
248 "unsolicited interrupt received");
249 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
250 dasd_schedule_device_bh(device);
251 return;
252}; 248};
253 249
254static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, 250static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
@@ -596,13 +592,14 @@ static struct dasd_discipline dasd_fba_discipline = {
596 .max_blocks = 96, 592 .max_blocks = 96,
597 .check_device = dasd_fba_check_characteristics, 593 .check_device = dasd_fba_check_characteristics,
598 .do_analysis = dasd_fba_do_analysis, 594 .do_analysis = dasd_fba_do_analysis,
595 .verify_path = dasd_generic_verify_path,
599 .fill_geometry = dasd_fba_fill_geometry, 596 .fill_geometry = dasd_fba_fill_geometry,
600 .start_IO = dasd_start_IO, 597 .start_IO = dasd_start_IO,
601 .term_IO = dasd_term_IO, 598 .term_IO = dasd_term_IO,
602 .handle_terminated_request = dasd_fba_handle_terminated_request, 599 .handle_terminated_request = dasd_fba_handle_terminated_request,
603 .erp_action = dasd_fba_erp_action, 600 .erp_action = dasd_fba_erp_action,
604 .erp_postaction = dasd_fba_erp_postaction, 601 .erp_postaction = dasd_fba_erp_postaction,
605 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt, 602 .check_for_device_change = dasd_fba_check_for_device_change,
606 .build_cp = dasd_fba_build_cp, 603 .build_cp = dasd_fba_build_cp,
607 .free_cp = dasd_fba_free_cp, 604 .free_cp = dasd_fba_free_cp,
608 .dump_sense = dasd_fba_dump_sense, 605 .dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 30a1ca3d08b7..19a1ff03d65e 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -73,7 +73,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
73 if (base->features & DASD_FEATURE_READONLY || 73 if (base->features & DASD_FEATURE_READONLY ||
74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) 74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
75 set_disk_ro(gdp, 1); 75 set_disk_ro(gdp, 1);
76 gdp->private_data = block; 76 dasd_add_link_to_gendisk(gdp, base);
77 gdp->queue = block->request_queue; 77 gdp->queue = block->request_queue;
78 block->gdp = gdp; 78 block->gdp = gdp;
79 set_capacity(block->gdp, 0); 79 set_capacity(block->gdp, 0);
@@ -103,7 +103,7 @@ int dasd_scan_partitions(struct dasd_block *block)
103 struct block_device *bdev; 103 struct block_device *bdev;
104 104
105 bdev = bdget_disk(block->gdp, 0); 105 bdev = bdget_disk(block->gdp, 0);
106 if (!bdev || blkdev_get(bdev, FMODE_READ) < 0) 106 if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
107 return -ENODEV; 107 return -ENODEV;
108 /* 108 /*
109 * See fs/partition/check.c:register_disk,rescan_partitions 109 * See fs/partition/check.c:register_disk,rescan_partitions
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 500678d7116c..d1e4f2c1264c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -231,6 +231,11 @@ struct dasd_ccw_req {
231/* per dasd_ccw_req flags */ 231/* per dasd_ccw_req flags */
232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ 233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
234#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
235#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
236 * stolen. Should not be combined with
237 * DASD_CQR_FLAGS_USE_ERP
238 */
234 239
235/* Signature for error recovery functions. */ 240/* Signature for error recovery functions. */
236typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 241typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -287,6 +292,14 @@ struct dasd_discipline {
287 int (*do_analysis) (struct dasd_block *); 292 int (*do_analysis) (struct dasd_block *);
288 293
289 /* 294 /*
295 * This function is called, when new paths become available.
296 * Disciplins may use this callback to do necessary setup work,
297 * e.g. verify that new path is compatible with the current
298 * configuration.
299 */
300 int (*verify_path)(struct dasd_device *, __u8);
301
302 /*
290 * Last things to do when a device is set online, and first things 303 * Last things to do when a device is set online, and first things
291 * when it is set offline. 304 * when it is set offline.
292 */ 305 */
@@ -325,9 +338,9 @@ struct dasd_discipline {
325 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 338 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
326 struct irb *); 339 struct irb *);
327 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); 340 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
328 341 void (*check_for_device_change) (struct dasd_device *,
329 void (*handle_unsolicited_interrupt) (struct dasd_device *, 342 struct dasd_ccw_req *,
330 struct irb *); 343 struct irb *);
331 344
332 /* i/o control functions. */ 345 /* i/o control functions. */
333 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); 346 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
@@ -362,6 +375,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
362#define DASD_EER_STATECHANGE 3 375#define DASD_EER_STATECHANGE 3
363#define DASD_EER_PPRCSUSPEND 4 376#define DASD_EER_PPRCSUSPEND 4
364 377
378struct dasd_path {
379 __u8 opm;
380 __u8 tbvpm;
381 __u8 ppm;
382 __u8 npm;
383};
384
365struct dasd_device { 385struct dasd_device {
366 /* Block device stuff. */ 386 /* Block device stuff. */
367 struct dasd_block *block; 387 struct dasd_block *block;
@@ -377,6 +397,7 @@ struct dasd_device {
377 struct dasd_discipline *discipline; 397 struct dasd_discipline *discipline;
378 struct dasd_discipline *base_discipline; 398 struct dasd_discipline *base_discipline;
379 char *private; 399 char *private;
400 struct dasd_path path_data;
380 401
381 /* Device state and target state. */ 402 /* Device state and target state. */
382 int state, target; 403 int state, target;
@@ -456,6 +477,9 @@ struct dasd_block {
456 * confuse this with the user specified 477 * confuse this with the user specified
457 * read-only feature. 478 * read-only feature.
458 */ 479 */
480#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
481#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
482
459 483
460void dasd_put_device_wake(struct dasd_device *); 484void dasd_put_device_wake(struct dasd_device *);
461 485
@@ -620,10 +644,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
620int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); 644int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
621int dasd_generic_set_offline (struct ccw_device *cdev); 645int dasd_generic_set_offline (struct ccw_device *cdev);
622int dasd_generic_notify(struct ccw_device *, int); 646int dasd_generic_notify(struct ccw_device *, int);
647int dasd_generic_last_path_gone(struct dasd_device *);
648int dasd_generic_path_operational(struct dasd_device *);
649
623void dasd_generic_handle_state_change(struct dasd_device *); 650void dasd_generic_handle_state_change(struct dasd_device *);
624int dasd_generic_pm_freeze(struct ccw_device *); 651int dasd_generic_pm_freeze(struct ccw_device *);
625int dasd_generic_restore_device(struct ccw_device *); 652int dasd_generic_restore_device(struct ccw_device *);
626enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); 653enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
654void dasd_generic_path_event(struct ccw_device *, int *);
655int dasd_generic_verify_path(struct dasd_device *, __u8);
627 656
628int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 657int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
629char *dasd_get_sense(struct irb *); 658char *dasd_get_sense(struct irb *);
@@ -657,6 +686,9 @@ struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
657struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); 686struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
658struct dasd_device *dasd_device_from_devindex(int); 687struct dasd_device *dasd_device_from_devindex(int);
659 688
689void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
690struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
691
660int dasd_parse(void); 692int dasd_parse(void);
661int dasd_busid_known(const char *); 693int dasd_busid_known(const char *);
662 694
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 1557214944f7..72261e4c516d 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -16,7 +16,6 @@
16#include <linux/major.h> 16#include <linux/major.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/smp_lock.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <asm/compat.h> 20#include <asm/compat.h>
22#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
@@ -43,16 +42,22 @@ dasd_ioctl_api_version(void __user *argp)
43static int 42static int
44dasd_ioctl_enable(struct block_device *bdev) 43dasd_ioctl_enable(struct block_device *bdev)
45{ 44{
46 struct dasd_block *block = bdev->bd_disk->private_data; 45 struct dasd_device *base;
47 46
48 if (!capable(CAP_SYS_ADMIN)) 47 if (!capable(CAP_SYS_ADMIN))
49 return -EACCES; 48 return -EACCES;
50 49
51 dasd_enable_device(block->base); 50 base = dasd_device_from_gendisk(bdev->bd_disk);
51 if (!base)
52 return -ENODEV;
53
54 dasd_enable_device(base);
52 /* Formatting the dasd device can change the capacity. */ 55 /* Formatting the dasd device can change the capacity. */
53 mutex_lock(&bdev->bd_mutex); 56 mutex_lock(&bdev->bd_mutex);
54 i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); 57 i_size_write(bdev->bd_inode,
58 (loff_t)get_capacity(base->block->gdp) << 9);
55 mutex_unlock(&bdev->bd_mutex); 59 mutex_unlock(&bdev->bd_mutex);
60 dasd_put_device(base);
56 return 0; 61 return 0;
57} 62}
58 63
@@ -63,11 +68,14 @@ dasd_ioctl_enable(struct block_device *bdev)
63static int 68static int
64dasd_ioctl_disable(struct block_device *bdev) 69dasd_ioctl_disable(struct block_device *bdev)
65{ 70{
66 struct dasd_block *block = bdev->bd_disk->private_data; 71 struct dasd_device *base;
67 72
68 if (!capable(CAP_SYS_ADMIN)) 73 if (!capable(CAP_SYS_ADMIN))
69 return -EACCES; 74 return -EACCES;
70 75
76 base = dasd_device_from_gendisk(bdev->bd_disk);
77 if (!base)
78 return -ENODEV;
71 /* 79 /*
72 * Man this is sick. We don't do a real disable but only downgrade 80 * Man this is sick. We don't do a real disable but only downgrade
73 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses 81 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
@@ -76,7 +84,7 @@ dasd_ioctl_disable(struct block_device *bdev)
76 * using the BIODASDFMT ioctl. Therefore the correct state for the 84 * using the BIODASDFMT ioctl. Therefore the correct state for the
77 * device is DASD_STATE_BASIC that allows to do basic i/o. 85 * device is DASD_STATE_BASIC that allows to do basic i/o.
78 */ 86 */
79 dasd_set_target_state(block->base, DASD_STATE_BASIC); 87 dasd_set_target_state(base, DASD_STATE_BASIC);
80 /* 88 /*
81 * Set i_size to zero, since read, write, etc. check against this 89 * Set i_size to zero, since read, write, etc. check against this
82 * value. 90 * value.
@@ -84,6 +92,7 @@ dasd_ioctl_disable(struct block_device *bdev)
84 mutex_lock(&bdev->bd_mutex); 92 mutex_lock(&bdev->bd_mutex);
85 i_size_write(bdev->bd_inode, 0); 93 i_size_write(bdev->bd_inode, 0);
86 mutex_unlock(&bdev->bd_mutex); 94 mutex_unlock(&bdev->bd_mutex);
95 dasd_put_device(base);
87 return 0; 96 return 0;
88} 97}
89 98
@@ -192,26 +201,36 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
192static int 201static int
193dasd_ioctl_format(struct block_device *bdev, void __user *argp) 202dasd_ioctl_format(struct block_device *bdev, void __user *argp)
194{ 203{
195 struct dasd_block *block = bdev->bd_disk->private_data; 204 struct dasd_device *base;
196 struct format_data_t fdata; 205 struct format_data_t fdata;
206 int rc;
197 207
198 if (!capable(CAP_SYS_ADMIN)) 208 if (!capable(CAP_SYS_ADMIN))
199 return -EACCES; 209 return -EACCES;
200 if (!argp) 210 if (!argp)
201 return -EINVAL; 211 return -EINVAL;
202 212 base = dasd_device_from_gendisk(bdev->bd_disk);
203 if (block->base->features & DASD_FEATURE_READONLY || 213 if (!base)
204 test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 214 return -ENODEV;
215 if (base->features & DASD_FEATURE_READONLY ||
216 test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
217 dasd_put_device(base);
205 return -EROFS; 218 return -EROFS;
206 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 219 }
220 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
221 dasd_put_device(base);
207 return -EFAULT; 222 return -EFAULT;
223 }
208 if (bdev != bdev->bd_contains) { 224 if (bdev != bdev->bd_contains) {
209 pr_warning("%s: The specified DASD is a partition and cannot " 225 pr_warning("%s: The specified DASD is a partition and cannot "
210 "be formatted\n", 226 "be formatted\n",
211 dev_name(&block->base->cdev->dev)); 227 dev_name(&base->cdev->dev));
228 dasd_put_device(base);
212 return -EINVAL; 229 return -EINVAL;
213 } 230 }
214 return dasd_format(block, &fdata); 231 rc = dasd_format(base->block, &fdata);
232 dasd_put_device(base);
233 return rc;
215} 234}
216 235
217#ifdef CONFIG_DASD_PROFILE 236#ifdef CONFIG_DASD_PROFILE
@@ -341,8 +360,8 @@ static int dasd_ioctl_information(struct dasd_block *block,
341static int 360static int
342dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) 361dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
343{ 362{
344 struct dasd_block *block = bdev->bd_disk->private_data; 363 struct dasd_device *base;
345 int intval; 364 int intval, rc;
346 365
347 if (!capable(CAP_SYS_ADMIN)) 366 if (!capable(CAP_SYS_ADMIN))
348 return -EACCES; 367 return -EACCES;
@@ -351,10 +370,17 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
351 return -EINVAL; 370 return -EINVAL;
352 if (get_user(intval, (int __user *)argp)) 371 if (get_user(intval, (int __user *)argp))
353 return -EFAULT; 372 return -EFAULT;
354 if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 373 base = dasd_device_from_gendisk(bdev->bd_disk);
374 if (!base)
375 return -ENODEV;
376 if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
377 dasd_put_device(base);
355 return -EROFS; 378 return -EROFS;
379 }
356 set_disk_ro(bdev->bd_disk, intval); 380 set_disk_ro(bdev->bd_disk, intval);
357 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); 381 rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
382 dasd_put_device(base);
383 return rc;
358} 384}
359 385
360static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, 386static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
@@ -370,74 +396,81 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
370 return ret; 396 return ret;
371} 397}
372 398
373static int 399int dasd_ioctl(struct block_device *bdev, fmode_t mode,
374dasd_do_ioctl(struct block_device *bdev, fmode_t mode, 400 unsigned int cmd, unsigned long arg)
375 unsigned int cmd, unsigned long arg)
376{ 401{
377 struct dasd_block *block = bdev->bd_disk->private_data; 402 struct dasd_block *block;
403 struct dasd_device *base;
378 void __user *argp; 404 void __user *argp;
405 int rc;
379 406
380 if (is_compat_task()) 407 if (is_compat_task())
381 argp = compat_ptr(arg); 408 argp = compat_ptr(arg);
382 else 409 else
383 argp = (void __user *)arg; 410 argp = (void __user *)arg;
384 411
385 if (!block)
386 return -ENODEV;
387
388 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { 412 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
389 PRINT_DEBUG("empty data ptr"); 413 PRINT_DEBUG("empty data ptr");
390 return -EINVAL; 414 return -EINVAL;
391 } 415 }
392 416
417 base = dasd_device_from_gendisk(bdev->bd_disk);
418 if (!base)
419 return -ENODEV;
420 block = base->block;
421 rc = 0;
393 switch (cmd) { 422 switch (cmd) {
394 case BIODASDDISABLE: 423 case BIODASDDISABLE:
395 return dasd_ioctl_disable(bdev); 424 rc = dasd_ioctl_disable(bdev);
425 break;
396 case BIODASDENABLE: 426 case BIODASDENABLE:
397 return dasd_ioctl_enable(bdev); 427 rc = dasd_ioctl_enable(bdev);
428 break;
398 case BIODASDQUIESCE: 429 case BIODASDQUIESCE:
399 return dasd_ioctl_quiesce(block); 430 rc = dasd_ioctl_quiesce(block);
431 break;
400 case BIODASDRESUME: 432 case BIODASDRESUME:
401 return dasd_ioctl_resume(block); 433 rc = dasd_ioctl_resume(block);
434 break;
402 case BIODASDFMT: 435 case BIODASDFMT:
403 return dasd_ioctl_format(bdev, argp); 436 rc = dasd_ioctl_format(bdev, argp);
437 break;
404 case BIODASDINFO: 438 case BIODASDINFO:
405 return dasd_ioctl_information(block, cmd, argp); 439 rc = dasd_ioctl_information(block, cmd, argp);
440 break;
406 case BIODASDINFO2: 441 case BIODASDINFO2:
407 return dasd_ioctl_information(block, cmd, argp); 442 rc = dasd_ioctl_information(block, cmd, argp);
443 break;
408 case BIODASDPRRD: 444 case BIODASDPRRD:
409 return dasd_ioctl_read_profile(block, argp); 445 rc = dasd_ioctl_read_profile(block, argp);
446 break;
410 case BIODASDPRRST: 447 case BIODASDPRRST:
411 return dasd_ioctl_reset_profile(block); 448 rc = dasd_ioctl_reset_profile(block);
449 break;
412 case BLKROSET: 450 case BLKROSET:
413 return dasd_ioctl_set_ro(bdev, argp); 451 rc = dasd_ioctl_set_ro(bdev, argp);
452 break;
414 case DASDAPIVER: 453 case DASDAPIVER:
415 return dasd_ioctl_api_version(argp); 454 rc = dasd_ioctl_api_version(argp);
455 break;
416 case BIODASDCMFENABLE: 456 case BIODASDCMFENABLE:
417 return enable_cmf(block->base->cdev); 457 rc = enable_cmf(base->cdev);
458 break;
418 case BIODASDCMFDISABLE: 459 case BIODASDCMFDISABLE:
419 return disable_cmf(block->base->cdev); 460 rc = disable_cmf(base->cdev);
461 break;
420 case BIODASDREADALLCMB: 462 case BIODASDREADALLCMB:
421 return dasd_ioctl_readall_cmb(block, cmd, argp); 463 rc = dasd_ioctl_readall_cmb(block, cmd, argp);
464 break;
422 default: 465 default:
423 /* if the discipline has an ioctl method try it. */ 466 /* if the discipline has an ioctl method try it. */
424 if (block->base->discipline->ioctl) { 467 if (base->discipline->ioctl) {
425 int rval = block->base->discipline->ioctl(block, cmd, argp); 468 rc = base->discipline->ioctl(block, cmd, argp);
426 if (rval != -ENOIOCTLCMD) 469 if (rc == -ENOIOCTLCMD)
427 return rval; 470 rc = -EINVAL;
428 } 471 } else
429 472 rc = -EINVAL;
430 return -EINVAL;
431 } 473 }
432} 474 dasd_put_device(base);
433
434int dasd_ioctl(struct block_device *bdev, fmode_t mode,
435 unsigned int cmd, unsigned long arg)
436{
437 int rc;
438
439 lock_kernel();
440 rc = dasd_do_ioctl(bdev, mode, cmd, arg);
441 unlock_kernel();
442 return rc; 475 return rc;
443} 476}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 2eb025592809..c4a6a31bd9cd 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -251,7 +251,6 @@ static ssize_t dasd_stats_proc_write(struct file *file,
251 buffer = dasd_get_user_string(user_buf, user_len); 251 buffer = dasd_get_user_string(user_buf, user_len);
252 if (IS_ERR(buffer)) 252 if (IS_ERR(buffer))
253 return PTR_ERR(buffer); 253 return PTR_ERR(buffer);
254 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
255 254
256 /* check for valid verbs */ 255 /* check for valid verbs */
257 str = skip_spaces(buffer); 256 str = skip_spaces(buffer);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 2bd72aa34c59..9b43ae94beba 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -14,7 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/blkdev.h> 16#include <linux/blkdev.h>
17#include <linux/smp_lock.h>
18#include <linux/completion.h> 17#include <linux/completion.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
@@ -776,7 +775,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
776 struct dcssblk_dev_info *dev_info; 775 struct dcssblk_dev_info *dev_info;
777 int rc; 776 int rc;
778 777
779 lock_kernel();
780 dev_info = bdev->bd_disk->private_data; 778 dev_info = bdev->bd_disk->private_data;
781 if (NULL == dev_info) { 779 if (NULL == dev_info) {
782 rc = -ENODEV; 780 rc = -ENODEV;
@@ -786,7 +784,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
786 bdev->bd_block_size = 4096; 784 bdev->bd_block_size = 4096;
787 rc = 0; 785 rc = 0;
788out: 786out:
789 unlock_kernel();
790 return rc; 787 return rc;
791} 788}
792 789
@@ -797,7 +794,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
797 struct segment_info *entry; 794 struct segment_info *entry;
798 int rc; 795 int rc;
799 796
800 lock_kernel();
801 if (!dev_info) { 797 if (!dev_info) {
802 rc = -ENODEV; 798 rc = -ENODEV;
803 goto out; 799 goto out;
@@ -815,7 +811,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
815 up_write(&dcssblk_devices_sem); 811 up_write(&dcssblk_devices_sem);
816 rc = 0; 812 rc = 0;
817out: 813out:
818 unlock_kernel();
819 return rc; 814 return rc;
820} 815}
821 816
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14fa5dd..1f6a4d894e73 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
62/* 62/*
63 * Parameter parsing functions. 63 * Parameter parsing functions.
64 */ 64 */
65static int __initdata devs = XPRAM_DEVS; 65static int devs = XPRAM_DEVS;
66static char __initdata *sizes[XPRAM_MAX_DEVS]; 66static char *sizes[XPRAM_MAX_DEVS];
67 67
68module_param(devs, int, 0); 68module_param(devs, int, 0);
69module_param_array(sizes, charp, NULL, 0); 69module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 40834f18754c..a4f117d9fdc6 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -2,76 +2,85 @@ comment "S/390 character device drivers"
2 depends on S390 2 depends on S390
3 3
4config TN3270 4config TN3270
5 tristate "Support for locally attached 3270 terminals" 5 def_tristate y
6 prompt "Support for locally attached 3270 terminals"
6 depends on CCW 7 depends on CCW
7 help 8 help
8 Include support for IBM 3270 terminals. 9 Include support for IBM 3270 terminals.
9 10
10config TN3270_TTY 11config TN3270_TTY
11 tristate "Support for tty input/output on 3270 terminals" 12 def_tristate y
13 prompt "Support for tty input/output on 3270 terminals"
12 depends on TN3270 14 depends on TN3270
13 help 15 help
14 Include support for using an IBM 3270 terminal as a Linux tty. 16 Include support for using an IBM 3270 terminal as a Linux tty.
15 17
16config TN3270_FS 18config TN3270_FS
17 tristate "Support for fullscreen applications on 3270 terminals" 19 def_tristate m
20 prompt "Support for fullscreen applications on 3270 terminals"
18 depends on TN3270 21 depends on TN3270
19 help 22 help
20 Include support for fullscreen applications on an IBM 3270 terminal. 23 Include support for fullscreen applications on an IBM 3270 terminal.
21 24
22config TN3270_CONSOLE 25config TN3270_CONSOLE
23 bool "Support for console on 3270 terminal" 26 def_bool y
27 prompt "Support for console on 3270 terminal"
24 depends on TN3270=y && TN3270_TTY=y 28 depends on TN3270=y && TN3270_TTY=y
25 help 29 help
26 Include support for using an IBM 3270 terminal as a Linux system 30 Include support for using an IBM 3270 terminal as a Linux system
27 console. Available only if 3270 support is compiled in statically. 31 console. Available only if 3270 support is compiled in statically.
28 32
29config TN3215 33config TN3215
30 bool "Support for 3215 line mode terminal" 34 def_bool y
35 prompt "Support for 3215 line mode terminal"
31 depends on CCW 36 depends on CCW
32 help 37 help
33 Include support for IBM 3215 line-mode terminals. 38 Include support for IBM 3215 line-mode terminals.
34 39
35config TN3215_CONSOLE 40config TN3215_CONSOLE
36 bool "Support for console on 3215 line mode terminal" 41 def_bool y
42 prompt "Support for console on 3215 line mode terminal"
37 depends on TN3215 43 depends on TN3215
38 help 44 help
39 Include support for using an IBM 3215 line-mode terminal as a 45 Include support for using an IBM 3215 line-mode terminal as a
40 Linux system console. 46 Linux system console.
41 47
42config CCW_CONSOLE 48config CCW_CONSOLE
43 bool 49 def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
44 depends on TN3215_CONSOLE || TN3270_CONSOLE
45 default y
46 50
47config SCLP_TTY 51config SCLP_TTY
48 bool "Support for SCLP line mode terminal" 52 def_bool y
53 prompt "Support for SCLP line mode terminal"
49 depends on S390 54 depends on S390
50 help 55 help
51 Include support for IBM SCLP line-mode terminals. 56 Include support for IBM SCLP line-mode terminals.
52 57
53config SCLP_CONSOLE 58config SCLP_CONSOLE
54 bool "Support for console on SCLP line mode terminal" 59 def_bool y
60 prompt "Support for console on SCLP line mode terminal"
55 depends on SCLP_TTY 61 depends on SCLP_TTY
56 help 62 help
57 Include support for using an IBM HWC line-mode terminal as the Linux 63 Include support for using an IBM HWC line-mode terminal as the Linux
58 system console. 64 system console.
59 65
60config SCLP_VT220_TTY 66config SCLP_VT220_TTY
61 bool "Support for SCLP VT220-compatible terminal" 67 def_bool y
68 prompt "Support for SCLP VT220-compatible terminal"
62 depends on S390 69 depends on S390
63 help 70 help
64 Include support for an IBM SCLP VT220-compatible terminal. 71 Include support for an IBM SCLP VT220-compatible terminal.
65 72
66config SCLP_VT220_CONSOLE 73config SCLP_VT220_CONSOLE
67 bool "Support for console on SCLP VT220-compatible terminal" 74 def_bool y
75 prompt "Support for console on SCLP VT220-compatible terminal"
68 depends on SCLP_VT220_TTY 76 depends on SCLP_VT220_TTY
69 help 77 help
70 Include support for using an IBM SCLP VT220-compatible terminal as a 78 Include support for using an IBM SCLP VT220-compatible terminal as a
71 Linux system console. 79 Linux system console.
72 80
73config SCLP_CPI 81config SCLP_CPI
74 tristate "Control-Program Identification" 82 def_tristate m
83 prompt "Control-Program Identification"
75 depends on S390 84 depends on S390
76 help 85 help
77 This option enables the hardware console interface for system 86 This option enables the hardware console interface for system
@@ -83,7 +92,8 @@ config SCLP_CPI
83 need this feature and intend to run your kernel in LPAR. 92 need this feature and intend to run your kernel in LPAR.
84 93
85config SCLP_ASYNC 94config SCLP_ASYNC
86 tristate "Support for Call Home via Asynchronous SCLP Records" 95 def_tristate m
96 prompt "Support for Call Home via Asynchronous SCLP Records"
87 depends on S390 97 depends on S390
88 help 98 help
89 This option enables the call home function, which is able to inform 99 This option enables the call home function, which is able to inform
@@ -93,7 +103,8 @@ config SCLP_ASYNC
93 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
94 104
95config S390_TAPE 105config S390_TAPE
96 tristate "S/390 tape device support" 106 def_tristate m
107 prompt "S/390 tape device support"
97 depends on CCW 108 depends on CCW
98 help 109 help
99 Select this option if you want to access channel-attached tape 110 Select this option if you want to access channel-attached tape
@@ -108,22 +119,12 @@ config S390_TAPE
108comment "S/390 tape interface support" 119comment "S/390 tape interface support"
109 depends on S390_TAPE 120 depends on S390_TAPE
110 121
111config S390_TAPE_BLOCK
112 bool "Support for tape block devices"
113 depends on S390_TAPE && BLOCK
114 help
115 Select this option if you want to access your channel-attached tape
116 devices using the block device interface. This interface is similar
117 to CD-ROM devices on other platforms. The tapes can only be
118 accessed read-only when using this interface. Have a look at
119 <file:Documentation/s390/TAPE> for further information about creating
120 volumes for and using this interface. It is safe to say "Y" here.
121
122comment "S/390 tape hardware support" 122comment "S/390 tape hardware support"
123 depends on S390_TAPE 123 depends on S390_TAPE
124 124
125config S390_TAPE_34XX 125config S390_TAPE_34XX
126 tristate "Support for 3480/3490 tape hardware" 126 def_tristate m
127 prompt "Support for 3480/3490 tape hardware"
127 depends on S390_TAPE 128 depends on S390_TAPE
128 help 129 help
129 Select this option if you want to access IBM 3480/3490 magnetic 130 Select this option if you want to access IBM 3480/3490 magnetic
@@ -131,7 +132,8 @@ config S390_TAPE_34XX
131 It is safe to say "Y" here. 132 It is safe to say "Y" here.
132 133
133config S390_TAPE_3590 134config S390_TAPE_3590
134 tristate "Support for 3590 tape hardware" 135 def_tristate m
136 prompt "Support for 3590 tape hardware"
135 depends on S390_TAPE 137 depends on S390_TAPE
136 help 138 help
137 Select this option if you want to access IBM 3590 magnetic 139 Select this option if you want to access IBM 3590 magnetic
@@ -139,7 +141,8 @@ config S390_TAPE_3590
139 It is safe to say "Y" here. 141 It is safe to say "Y" here.
140 142
141config VMLOGRDR 143config VMLOGRDR
142 tristate "Support for the z/VM recording system services (VM only)" 144 def_tristate m
145 prompt "Support for the z/VM recording system services (VM only)"
143 depends on IUCV 146 depends on IUCV
144 help 147 help
145 Select this option if you want to be able to receive records collected 148 Select this option if you want to be able to receive records collected
@@ -148,29 +151,31 @@ config VMLOGRDR
148 This driver depends on the IUCV support driver. 151 This driver depends on the IUCV support driver.
149 152
150config VMCP 153config VMCP
151 bool "Support for the z/VM CP interface" 154 def_bool y
155 prompt "Support for the z/VM CP interface"
152 depends on S390 156 depends on S390
153 help 157 help
154 Select this option if you want to be able to interact with the control 158 Select this option if you want to be able to interact with the control
155 program on z/VM 159 program on z/VM
156 160
157config MONREADER 161config MONREADER
158 tristate "API for reading z/VM monitor service records" 162 def_tristate m
163 prompt "API for reading z/VM monitor service records"
159 depends on IUCV 164 depends on IUCV
160 help 165 help
161 Character device driver for reading z/VM monitor service records 166 Character device driver for reading z/VM monitor service records
162 167
163config MONWRITER 168config MONWRITER
164 tristate "API for writing z/VM monitor service records" 169 def_tristate m
170 prompt "API for writing z/VM monitor service records"
165 depends on S390 171 depends on S390
166 default "m"
167 help 172 help
168 Character device driver for writing z/VM monitor service records 173 Character device driver for writing z/VM monitor service records
169 174
170config S390_VMUR 175config S390_VMUR
171 tristate "z/VM unit record device driver" 176 def_tristate m
177 prompt "z/VM unit record device driver"
172 depends on S390 178 depends on S390
173 default "m"
174 help 179 help
175 Character device driver for z/VM reader, puncher and printer. 180 Character device driver for z/VM reader, puncher and printer.
176 181
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index efb500ab66c0..f3c325207445 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o 6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -22,7 +22,6 @@ obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o 22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
23obj-$(CONFIG_VMCP) += vmcp.o 23obj-$(CONFIG_VMCP) += vmcp.o
24 24
25tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
26tape-$(CONFIG_PROC_FS) += tape_proc.o 25tape-$(CONFIG_PROC_FS) += tape_proc.o
27tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) 26tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
28obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o 27obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 59ec073724bf..694464c65fcd 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,6 +9,7 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/kernel_stat.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kdev_t.h> 15#include <linux/kdev_t.h>
@@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
361 int cstat, dstat; 362 int cstat, dstat;
362 int count; 363 int count;
363 364
365 kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
364 raw = dev_get_drvdata(&cdev->dev); 366 raw = dev_get_drvdata(&cdev->dev);
365 req = (struct raw3215_req *) intparm; 367 req = (struct raw3215_req *) intparm;
366 cstat = irb->scsw.cmd.cstat; 368 cstat = irb->scsw.cmd.cstat;
@@ -762,8 +764,10 @@ static struct ccw_device_id raw3215_id[] = {
762}; 764};
763 765
764static struct ccw_driver raw3215_ccw_driver = { 766static struct ccw_driver raw3215_ccw_driver = {
765 .name = "3215", 767 .driver = {
766 .owner = THIS_MODULE, 768 .name = "3215",
769 .owner = THIS_MODULE,
770 },
767 .ids = raw3215_id, 771 .ids = raw3215_id,
768 .probe = &raw3215_probe, 772 .probe = &raw3215_probe,
769 .remove = &raw3215_remove, 773 .remove = &raw3215_remove,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 857dfcb7b359..f6489eb7e976 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -14,7 +14,6 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/smp_lock.h>
18 17
19#include <asm/compat.h> 18#include <asm/compat.h>
20#include <asm/ccwdev.h> 19#include <asm/ccwdev.h>
@@ -520,6 +519,7 @@ static const struct file_operations fs3270_fops = {
520 .compat_ioctl = fs3270_ioctl, /* ioctl */ 519 .compat_ioctl = fs3270_ioctl, /* ioctl */
521 .open = fs3270_open, /* open */ 520 .open = fs3270_open, /* open */
522 .release = fs3270_close, /* release */ 521 .release = fs3270_close, /* release */
522 .llseek = no_llseek,
523}; 523};
524 524
525/* 525/*
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e412b5e..806588192483 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -455,12 +455,11 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
455 return 0; 455 return 0;
456} 456}
457 457
458int 458int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
459kbd_ioctl(struct kbd_data *kbd, struct file *file,
460 unsigned int cmd, unsigned long arg)
461{ 459{
462 void __user *argp; 460 void __user *argp;
463 int ct, perm; 461 unsigned int ct;
462 int perm;
464 463
465 argp = (void __user *)arg; 464 argp = (void __user *)arg;
466 465
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index 5ccfe9cf126d..7e736aaeae6e 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -36,7 +36,7 @@ void kbd_free(struct kbd_data *);
36void kbd_ascebc(struct kbd_data *, unsigned char *); 36void kbd_ascebc(struct kbd_data *, unsigned char *);
37 37
38void kbd_keycode(struct kbd_data *, unsigned int); 38void kbd_keycode(struct kbd_data *, unsigned int);
39int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long); 39int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
40 40
41/* 41/*
42 * Helper Functions. 42 * Helper Functions.
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index e021ec663ef9..5b8b8592d311 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -447,6 +447,7 @@ static const struct file_operations mon_fops = {
447 .release = &mon_close, 447 .release = &mon_close,
448 .read = &mon_read, 448 .read = &mon_read,
449 .poll = &mon_poll, 449 .poll = &mon_poll,
450 .llseek = noop_llseek,
450}; 451};
451 452
452static struct miscdevice mon_dev = { 453static struct miscdevice mon_dev = {
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 572a1e7fd099..4600aa10a1c6 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -97,7 +97,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
97{ 97{
98 struct monwrite_hdr *monhdr = &monpriv->hdr; 98 struct monwrite_hdr *monhdr = &monpriv->hdr;
99 struct mon_buf *monbuf; 99 struct mon_buf *monbuf;
100 int rc; 100 int rc = 0;
101 101
102 if (monhdr->datalen > MONWRITE_MAX_DATALEN || 102 if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
103 monhdr->mon_function > MONWRITE_START_CONFIG || 103 monhdr->mon_function > MONWRITE_START_CONFIG ||
@@ -135,7 +135,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
135 mon_buf_count++; 135 mon_buf_count++;
136 } 136 }
137 monpriv->current_buf = monbuf; 137 monpriv->current_buf = monbuf;
138 return 0; 138 return rc;
139} 139}
140 140
141static int monwrite_new_data(struct mon_private *monpriv) 141static int monwrite_new_data(struct mon_private *monpriv)
@@ -274,6 +274,7 @@ static const struct file_operations monwrite_fops = {
274 .open = &monwrite_open, 274 .open = &monwrite_open,
275 .release = &monwrite_close, 275 .release = &monwrite_close,
276 .write = &monwrite_write, 276 .write = &monwrite_write,
277 .llseek = noop_llseek,
277}; 278};
278 279
279static struct miscdevice mon_dev = { 280static struct miscdevice mon_dev = {
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2a4c566456e7..810ac38631c3 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,6 +7,7 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
329 struct raw3270_request *rq; 330 struct raw3270_request *rq;
330 int rc; 331 int rc;
331 332
333 kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
332 rp = dev_get_drvdata(&cdev->dev); 334 rp = dev_get_drvdata(&cdev->dev);
333 if (!rp) 335 if (!rp)
334 return; 336 return;
@@ -596,13 +598,12 @@ __raw3270_size_device(struct raw3270 *rp)
596 static const unsigned char wbuf[] = 598 static const unsigned char wbuf[] =
597 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; 599 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
598 struct raw3270_ua *uap; 600 struct raw3270_ua *uap;
599 unsigned short count;
600 int rc; 601 int rc;
601 602
602 /* 603 /*
603 * To determine the size of the 3270 device we need to do: 604 * To determine the size of the 3270 device we need to do:
604 * 1) send a 'read partition' data stream to the device 605 * 1) send a 'read partition' data stream to the device
605 * 2) wait for the attn interrupt that preceeds the query reply 606 * 2) wait for the attn interrupt that precedes the query reply
606 * 3) do a read modified to get the query reply 607 * 3) do a read modified to get the query reply
607 * To make things worse we have to cope with intervention 608 * To make things worse we have to cope with intervention
608 * required (3270 device switched to 'stand-by') and command 609 * required (3270 device switched to 'stand-by') and command
@@ -651,7 +652,6 @@ __raw3270_size_device(struct raw3270 *rp)
651 if (rc) 652 if (rc)
652 return rc; 653 return rc;
653 /* Got a Query Reply */ 654 /* Got a Query Reply */
654 count = sizeof(rp->init_data) - rp->init_request.rescnt;
655 uap = (struct raw3270_ua *) (rp->init_data + 1); 655 uap = (struct raw3270_ua *) (rp->init_data + 1);
656 /* Paranoia check. */ 656 /* Paranoia check. */
657 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) 657 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
@@ -1386,8 +1386,10 @@ static struct ccw_device_id raw3270_id[] = {
1386}; 1386};
1387 1387
1388static struct ccw_driver raw3270_ccw_driver = { 1388static struct ccw_driver raw3270_ccw_driver = {
1389 .name = "3270", 1389 .driver = {
1390 .owner = THIS_MODULE, 1390 .name = "3270",
1391 .owner = THIS_MODULE,
1392 },
1391 .ids = raw3270_id, 1393 .ids = raw3270_id,
1392 .probe = &raw3270_probe, 1394 .probe = &raw3270_probe,
1393 .remove = &raw3270_remove, 1395 .remove = &raw3270_remove,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f6d72e1f2a38..eaa7e78186f9 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
@@ -19,15 +20,12 @@
19#include <linux/completion.h> 20#include <linux/completion.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <asm/types.h> 22#include <asm/types.h>
22#include <asm/s390_ext.h> 23#include <asm/irq.h>
23 24
24#include "sclp.h" 25#include "sclp.h"
25 26
26#define SCLP_HEADER "sclp: " 27#define SCLP_HEADER "sclp: "
27 28
28/* Structure for register_early_external_interrupt. */
29static ext_int_info_t ext_int_info_hwc;
30
31/* Lock to protect internal data consistency. */ 29/* Lock to protect internal data consistency. */
32static DEFINE_SPINLOCK(sclp_lock); 30static DEFINE_SPINLOCK(sclp_lock);
33 31
@@ -395,16 +393,17 @@ __sclp_find_req(u32 sccb)
395/* Handler for external interruption. Perform request post-processing. 393/* Handler for external interruption. Perform request post-processing.
396 * Prepare read event data request if necessary. Start processing of next 394 * Prepare read event data request if necessary. Start processing of next
397 * request on queue. */ 395 * request on queue. */
398static void 396static void sclp_interrupt_handler(unsigned int ext_int_code,
399sclp_interrupt_handler(__u16 code) 397 unsigned int param32, unsigned long param64)
400{ 398{
401 struct sclp_req *req; 399 struct sclp_req *req;
402 u32 finished_sccb; 400 u32 finished_sccb;
403 u32 evbuf_pending; 401 u32 evbuf_pending;
404 402
403 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
405 spin_lock(&sclp_lock); 404 spin_lock(&sclp_lock);
406 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 405 finished_sccb = param32 & 0xfffffff8;
407 evbuf_pending = S390_lowcore.ext_params & 0x3; 406 evbuf_pending = param32 & 0x3;
408 if (finished_sccb) { 407 if (finished_sccb) {
409 del_timer(&sclp_request_timer); 408 del_timer(&sclp_request_timer);
410 sclp_running_state = sclp_running_state_reset_pending; 409 sclp_running_state = sclp_running_state_reset_pending;
@@ -468,7 +467,7 @@ sclp_sync_wait(void)
468 cr0_sync &= 0xffff00a0; 467 cr0_sync &= 0xffff00a0;
469 cr0_sync |= 0x00000200; 468 cr0_sync |= 0x00000200;
470 __ctl_load(cr0_sync, 0, 0); 469 __ctl_load(cr0_sync, 0, 0);
471 __raw_local_irq_stosm(0x01); 470 __arch_local_irq_stosm(0x01);
472 /* Loop until driver state indicates finished request */ 471 /* Loop until driver state indicates finished request */
473 while (sclp_running_state != sclp_running_state_idle) { 472 while (sclp_running_state != sclp_running_state_idle) {
474 /* Check for expired request timer */ 473 /* Check for expired request timer */
@@ -819,12 +818,13 @@ EXPORT_SYMBOL(sclp_reactivate);
819 818
820/* Handler for external interruption used during initialization. Modify 819/* Handler for external interruption used during initialization. Modify
821 * request state to done. */ 820 * request state to done. */
822static void 821static void sclp_check_handler(unsigned int ext_int_code,
823sclp_check_handler(__u16 code) 822 unsigned int param32, unsigned long param64)
824{ 823{
825 u32 finished_sccb; 824 u32 finished_sccb;
826 825
827 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 826 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
827 finished_sccb = param32 & 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */ 828 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb == 0) 829 if (finished_sccb == 0)
830 return; 830 return;
@@ -866,8 +866,7 @@ sclp_check_interface(void)
866 866
867 spin_lock_irqsave(&sclp_lock, flags); 867 spin_lock_irqsave(&sclp_lock, flags);
868 /* Prepare init mask command */ 868 /* Prepare init mask command */
869 rc = register_early_external_interrupt(0x2401, sclp_check_handler, 869 rc = register_external_interrupt(0x2401, sclp_check_handler);
870 &ext_int_info_hwc);
871 if (rc) { 870 if (rc) {
872 spin_unlock_irqrestore(&sclp_lock, flags); 871 spin_unlock_irqrestore(&sclp_lock, flags);
873 return rc; 872 return rc;
@@ -885,12 +884,12 @@ sclp_check_interface(void)
885 spin_unlock_irqrestore(&sclp_lock, flags); 884 spin_unlock_irqrestore(&sclp_lock, flags);
886 /* Enable service-signal interruption - needs to happen 885 /* Enable service-signal interruption - needs to happen
887 * with IRQs enabled. */ 886 * with IRQs enabled. */
888 ctl_set_bit(0, 9); 887 service_subclass_irq_register();
889 /* Wait for signal from interrupt or timeout */ 888 /* Wait for signal from interrupt or timeout */
890 sclp_sync_wait(); 889 sclp_sync_wait();
891 /* Disable service-signal interruption - needs to happen 890 /* Disable service-signal interruption - needs to happen
892 * with IRQs enabled. */ 891 * with IRQs enabled. */
893 ctl_clear_bit(0,9); 892 service_subclass_irq_unregister();
894 spin_lock_irqsave(&sclp_lock, flags); 893 spin_lock_irqsave(&sclp_lock, flags);
895 del_timer(&sclp_request_timer); 894 del_timer(&sclp_request_timer);
896 if (sclp_init_req.status == SCLP_REQ_DONE && 895 if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -900,8 +899,7 @@ sclp_check_interface(void)
900 } else 899 } else
901 rc = -EBUSY; 900 rc = -EBUSY;
902 } 901 }
903 unregister_early_external_interrupt(0x2401, sclp_check_handler, 902 unregister_external_interrupt(0x2401, sclp_check_handler);
904 &ext_int_info_hwc);
905 spin_unlock_irqrestore(&sclp_lock, flags); 903 spin_unlock_irqrestore(&sclp_lock, flags);
906 return rc; 904 return rc;
907} 905}
@@ -1064,15 +1062,14 @@ sclp_init(void)
1064 if (rc) 1062 if (rc)
1065 goto fail_init_state_uninitialized; 1063 goto fail_init_state_uninitialized;
1066 /* Register interrupt handler */ 1064 /* Register interrupt handler */
1067 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, 1065 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1068 &ext_int_info_hwc);
1069 if (rc) 1066 if (rc)
1070 goto fail_unregister_reboot_notifier; 1067 goto fail_unregister_reboot_notifier;
1071 sclp_init_state = sclp_init_state_initialized; 1068 sclp_init_state = sclp_init_state_initialized;
1072 spin_unlock_irqrestore(&sclp_lock, flags); 1069 spin_unlock_irqrestore(&sclp_lock, flags);
1073 /* Enable service-signal external interruption - needs to happen with 1070 /* Enable service-signal external interruption - needs to happen with
1074 * IRQs enabled. */ 1071 * IRQs enabled. */
1075 ctl_set_bit(0, 9); 1072 service_subclass_irq_register();
1076 sclp_init_mask(1); 1073 sclp_init_mask(1);
1077 return 0; 1074 return 0;
1078 1075
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6bb5a6bdfab5..49a1bb52bc87 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -28,6 +28,7 @@
28#define EVTYP_CONFMGMDATA 0x04 28#define EVTYP_CONFMGMDATA 0x04
29#define EVTYP_SDIAS 0x1C 29#define EVTYP_SDIAS 0x1C
30#define EVTYP_ASYNC 0x0A 30#define EVTYP_ASYNC 0x0A
31#define EVTYP_OCF 0x1E
31 32
32#define EVTYP_OPCMD_MASK 0x80000000 33#define EVTYP_OPCMD_MASK 0x80000000
33#define EVTYP_MSG_MASK 0x40000000 34#define EVTYP_MSG_MASK 0x40000000
@@ -40,6 +41,7 @@
40#define EVTYP_CONFMGMDATA_MASK 0x10000000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
41#define EVTYP_SDIAS_MASK 0x00000010 42#define EVTYP_SDIAS_MASK 0x00000010
42#define EVTYP_ASYNC_MASK 0x00400000 43#define EVTYP_ASYNC_MASK 0x00400000
44#define EVTYP_OCF_MASK 0x00000004
43 45
44#define GNRLMSGFLGS_DOM 0x8000 46#define GNRLMSGFLGS_DOM 0x8000
45#define GNRLMSGFLGS_SNDALRM 0x4000 47#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -186,4 +188,26 @@ sclp_ascebc_str(unsigned char *str, int nr)
186 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); 188 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
187} 189}
188 190
191static inline struct gds_vector *
192sclp_find_gds_vector(void *start, void *end, u16 id)
193{
194 struct gds_vector *v;
195
196 for (v = start; (void *) v < end; v = (void *) v + v->length)
197 if (v->gds_id == id)
198 return v;
199 return NULL;
200}
201
202static inline struct gds_subvector *
203sclp_find_gds_subvector(void *start, void *end, u8 key)
204{
205 struct gds_subvector *sv;
206
207 for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
208 if (sv->key == key)
209 return sv;
210 return NULL;
211}
212
189#endif /* __SCLP_H__ */ 213#endif /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 4b60ede07f0e..be55fb2b1b1c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
518 return; 518 return;
519 new_incr->rn = rn; 519 new_incr->rn = rn;
520 new_incr->standby = standby; 520 new_incr->standby = standby;
521 if (!standby)
522 new_incr->usecount = 1;
521 last_rn = 0; 523 last_rn = 0;
522 prev = &sclp_mem_list; 524 prev = &sclp_mem_list;
523 list_for_each_entry(incr, &sclp_mem_list, list) { 525 list_for_each_entry(incr, &sclp_mem_list, list) {
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index b497afe061cc..95b909ac2b73 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
33 int cpu; 33 int cpu;
34 struct sys_device *sysdev; 34 struct sys_device *sysdev;
35 35
36 s390_adjust_jiffies();
36 pr_warning("cpu capability changed.\n"); 37 pr_warning("cpu capability changed.\n");
37 get_online_cpus(); 38 get_online_cpus();
38 for_each_online_cpu(cpu) { 39 for_each_online_cpu(cpu) {
@@ -70,21 +71,9 @@ static struct sclp_register sclp_conf_register =
70 71
71static int __init sclp_conf_init(void) 72static int __init sclp_conf_init(void)
72{ 73{
73 int rc;
74
75 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); 74 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
76 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); 75 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
77 76 return sclp_register(&sclp_conf_register);
78 rc = sclp_register(&sclp_conf_register);
79 if (rc)
80 return rc;
81
82 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
83 pr_warning("no configuration management.\n");
84 sclp_unregister(&sclp_conf_register);
85 rc = -ENOSYS;
86 }
87 return rc;
88} 77}
89 78
90__initcall(sclp_conf_init); 79__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
new file mode 100644
index 000000000000..ab294d5a534e
--- /dev/null
+++ b/drivers/s390/char/sclp_ocf.c
@@ -0,0 +1,145 @@
1/*
2 * drivers/s390/char/sclp_ocf.c
3 * SCLP OCF communication parameters sysfs interface
4 *
5 * Copyright IBM Corp. 2011
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#define KMSG_COMPONENT "sclp_ocf"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/stat.h>
15#include <linux/device.h>
16#include <linux/string.h>
17#include <linux/ctype.h>
18#include <linux/kmod.h>
19#include <linux/timer.h>
20#include <linux/err.h>
21#include <asm/ebcdic.h>
22#include <asm/sclp.h>
23
24#include "sclp.h"
25
26#define OCF_LENGTH_HMC_NETWORK 8UL
27#define OCF_LENGTH_CPC_NAME 8UL
28
29static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
30static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
31
32static DEFINE_SPINLOCK(sclp_ocf_lock);
33static struct work_struct sclp_ocf_change_work;
34
35static struct kset *ocf_kset;
36
37static void sclp_ocf_change_notify(struct work_struct *work)
38{
39 kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
40}
41
42/* Handler for OCF event. Look for the CPC image name. */
43static void sclp_ocf_handler(struct evbuf_header *evbuf)
44{
45 struct gds_vector *v;
46 struct gds_subvector *sv, *netid, *cpc;
47 size_t size;
48
49 /* Find the 0x9f00 block. */
50 v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
51 0x9f00);
52 if (!v)
53 return;
54 /* Find the 0x9f22 block inside the 0x9f00 block. */
55 v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
56 if (!v)
57 return;
58 /* Find the 0x81 block inside the 0x9f22 block. */
59 sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
60 if (!sv)
61 return;
62 /* Find the 0x01 block inside the 0x81 block. */
63 netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
64 /* Find the 0x02 block inside the 0x81 block. */
65 cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
66 /* Copy network name and cpc name. */
67 spin_lock(&sclp_ocf_lock);
68 if (netid) {
69 size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
70 memcpy(hmc_network, netid + 1, size);
71 EBCASC(hmc_network, size);
72 hmc_network[size] = 0;
73 }
74 if (cpc) {
75 size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
76 memcpy(cpc_name, cpc + 1, size);
77 EBCASC(cpc_name, size);
78 cpc_name[size] = 0;
79 }
80 spin_unlock(&sclp_ocf_lock);
81 schedule_work(&sclp_ocf_change_work);
82}
83
84static struct sclp_register sclp_ocf_event = {
85 .receive_mask = EVTYP_OCF_MASK,
86 .receiver_fn = sclp_ocf_handler,
87};
88
89static ssize_t cpc_name_show(struct kobject *kobj,
90 struct kobj_attribute *attr, char *page)
91{
92 int rc;
93
94 spin_lock_irq(&sclp_ocf_lock);
95 rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
96 spin_unlock_irq(&sclp_ocf_lock);
97 return rc;
98}
99
100static struct kobj_attribute cpc_name_attr =
101 __ATTR(cpc_name, 0444, cpc_name_show, NULL);
102
103static ssize_t hmc_network_show(struct kobject *kobj,
104 struct kobj_attribute *attr, char *page)
105{
106 int rc;
107
108 spin_lock_irq(&sclp_ocf_lock);
109 rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
110 spin_unlock_irq(&sclp_ocf_lock);
111 return rc;
112}
113
114static struct kobj_attribute hmc_network_attr =
115 __ATTR(hmc_network, 0444, hmc_network_show, NULL);
116
117static struct attribute *ocf_attrs[] = {
118 &cpc_name_attr.attr,
119 &hmc_network_attr.attr,
120 NULL,
121};
122
123static struct attribute_group ocf_attr_group = {
124 .attrs = ocf_attrs,
125};
126
127static int __init ocf_init(void)
128{
129 int rc;
130
131 INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
132 ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
133 if (!ocf_kset)
134 return -ENOMEM;
135
136 rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
137 if (rc) {
138 kset_unregister(ocf_kset);
139 return rc;
140 }
141
142 return sclp_register(&sclp_ocf_event);
143}
144
145device_initcall(ocf_init);
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a1c58dc61a7..fa733ecd3d70 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -69,9 +69,6 @@ static DEFINE_MUTEX(sdias_mutex);
69 69
70static void sdias_callback(struct sclp_req *request, void *data) 70static void sdias_callback(struct sclp_req *request, void *data)
71{ 71{
72 struct sdias_sccb *cbsccb;
73
74 cbsccb = (struct sdias_sccb *) request->sccb;
75 sclp_req_done = 1; 72 sclp_req_done = 1;
76 wake_up(&sdias_wq); /* Inform caller, that request is complete */ 73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
77 TRACE("callback done\n"); 74 TRACE("callback done\n");
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 8258d590505f..a879c139926a 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -408,118 +408,72 @@ static int sclp_switch_cases(unsigned char *buf, int count)
408 return op - buf; 408 return op - buf;
409} 409}
410 410
411static void 411static void sclp_get_input(struct gds_subvector *sv)
412sclp_get_input(unsigned char *start, unsigned char *end)
413{ 412{
413 unsigned char *str;
414 int count; 414 int count;
415 415
416 count = end - start; 416 str = (unsigned char *) (sv + 1);
417 count = sv->length - sizeof(*sv);
417 if (sclp_tty_tolower) 418 if (sclp_tty_tolower)
418 EBC_TOLOWER(start, count); 419 EBC_TOLOWER(str, count);
419 count = sclp_switch_cases(start, count); 420 count = sclp_switch_cases(str, count);
420 /* convert EBCDIC to ASCII (modify original input in SCCB) */ 421 /* convert EBCDIC to ASCII (modify original input in SCCB) */
421 sclp_ebcasc_str(start, count); 422 sclp_ebcasc_str(str, count);
422 423
423 /* transfer input to high level driver */ 424 /* transfer input to high level driver */
424 sclp_tty_input(start, count); 425 sclp_tty_input(str, count);
425}
426
427static inline struct gds_vector *
428find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
429{
430 struct gds_vector *vec;
431
432 for (vec = start; vec < end; vec = (void *) vec + vec->length)
433 if (vec->gds_id == id)
434 return vec;
435 return NULL;
436} 426}
437 427
438static inline struct gds_subvector * 428static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
439find_gds_subvector(struct gds_subvector *start,
440 struct gds_subvector *end, u8 key)
441{ 429{
442 struct gds_subvector *subvec; 430 void *end;
443 431
444 for (subvec = start; subvec < end; 432 end = (void *) sv + sv->length;
445 subvec = (void *) subvec + subvec->length) 433 for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
446 if (subvec->key == key) 434 if (sv->key == 0x30)
447 return subvec; 435 sclp_get_input(sv);
448 return NULL;
449} 436}
450 437
451static inline void 438static inline void sclp_eval_textcmd(struct gds_vector *v)
452sclp_eval_selfdeftextmsg(struct gds_subvector *start,
453 struct gds_subvector *end)
454{ 439{
455 struct gds_subvector *subvec; 440 struct gds_subvector *sv;
456 441 void *end;
457 subvec = start;
458 while (subvec < end) {
459 subvec = find_gds_subvector(subvec, end, 0x30);
460 if (!subvec)
461 break;
462 sclp_get_input((unsigned char *)(subvec + 1),
463 (unsigned char *) subvec + subvec->length);
464 subvec = (void *) subvec + subvec->length;
465 }
466}
467 442
468static inline void 443 end = (void *) v + v->length;
469sclp_eval_textcmd(struct gds_subvector *start, 444 for (sv = (struct gds_subvector *) (v + 1);
470 struct gds_subvector *end) 445 (void *) sv < end; sv = (void *) sv + sv->length)
471{ 446 if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
472 struct gds_subvector *subvec; 447 sclp_eval_selfdeftextmsg(sv);
473 448
474 subvec = start;
475 while (subvec < end) {
476 subvec = find_gds_subvector(subvec, end,
477 GDS_KEY_SELFDEFTEXTMSG);
478 if (!subvec)
479 break;
480 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
481 (void *)subvec + subvec->length);
482 subvec = (void *) subvec + subvec->length;
483 }
484} 449}
485 450
486static inline void 451static inline void sclp_eval_cpmsu(struct gds_vector *v)
487sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
488{ 452{
489 struct gds_vector *vec; 453 void *end;
490 454
491 vec = start; 455 end = (void *) v + v->length;
492 while (vec < end) { 456 for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
493 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); 457 if (v->gds_id == GDS_ID_TEXTCMD)
494 if (!vec) 458 sclp_eval_textcmd(v);
495 break;
496 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
497 (void *) vec + vec->length);
498 vec = (void *) vec + vec->length;
499 }
500} 459}
501 460
502 461
503static inline void 462static inline void sclp_eval_mdsmu(struct gds_vector *v)
504sclp_eval_mdsmu(struct gds_vector *start, void *end)
505{ 463{
506 struct gds_vector *vec; 464 v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
507 465 if (v)
508 vec = find_gds_vector(start, end, GDS_ID_CPMSU); 466 sclp_eval_cpmsu(v);
509 if (vec)
510 sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
511} 467}
512 468
513static void 469static void sclp_tty_receiver(struct evbuf_header *evbuf)
514sclp_tty_receiver(struct evbuf_header *evbuf)
515{ 470{
516 struct gds_vector *start, *end, *vec; 471 struct gds_vector *v;
517 472
518 start = (struct gds_vector *)(evbuf + 1); 473 v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
519 end = (void *) evbuf + evbuf->length; 474 GDS_ID_MDSMU);
520 vec = find_gds_vector(start, end, GDS_ID_MDSMU); 475 if (v)
521 if (vec) 476 sclp_eval_mdsmu(v);
522 sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
523} 477}
524 478
525static void 479static void
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f073632..267b54e8ff5a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
280 return rc; 280 return rc;
281} 281}
282 282
283static inline void
284tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
285{
286 request->callback = (void *) tape_free_request;
287 request->callback_data = NULL;
288 tape_do_io_async(device, request);
289}
290
283extern int tape_oper_handler(int irq, int status); 291extern int tape_oper_handler(int irq, int status);
284extern void tape_noper_handler(int irq, int status); 292extern void tape_noper_handler(int irq, int status);
285extern int tape_open(struct tape_device *); 293extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b6136a..9eff2df70ddb 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
54 * So we just do a normal sense. 54 * So we just do a normal sense.
55 */ 55 */
56static int 56static void __tape_34xx_medium_sense(struct tape_request *request)
57tape_34xx_medium_sense(struct tape_device *device)
58{ 57{
59 struct tape_request *request; 58 struct tape_device *device = request->device;
60 unsigned char *sense; 59 unsigned char *sense;
61 int rc;
62
63 request = tape_alloc_request(1, 32);
64 if (IS_ERR(request)) {
65 DBF_EXCEPTION(6, "MSEN fail\n");
66 return PTR_ERR(request);
67 }
68
69 request->op = TO_MSEN;
70 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
71 60
72 rc = tape_do_io_interruptible(device, request);
73 if (request->rc == 0) { 61 if (request->rc == 0) {
74 sense = request->cpdata; 62 sense = request->cpdata;
75 63
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
88 device->tape_generic_status |= GMT_WR_PROT(~0); 76 device->tape_generic_status |= GMT_WR_PROT(~0);
89 else 77 else
90 device->tape_generic_status &= ~GMT_WR_PROT(~0); 78 device->tape_generic_status &= ~GMT_WR_PROT(~0);
91 } else { 79 } else
92 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 80 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
93 request->rc); 81 request->rc);
94 }
95 tape_free_request(request); 82 tape_free_request(request);
83}
96 84
85static int tape_34xx_medium_sense(struct tape_device *device)
86{
87 struct tape_request *request;
88 int rc;
89
90 request = tape_alloc_request(1, 32);
91 if (IS_ERR(request)) {
92 DBF_EXCEPTION(6, "MSEN fail\n");
93 return PTR_ERR(request);
94 }
95
96 request->op = TO_MSEN;
97 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
98 rc = tape_do_io_interruptible(device, request);
99 __tape_34xx_medium_sense(request);
97 return rc; 100 return rc;
98} 101}
99 102
103static void tape_34xx_medium_sense_async(struct tape_device *device)
104{
105 struct tape_request *request;
106
107 request = tape_alloc_request(1, 32);
108 if (IS_ERR(request)) {
109 DBF_EXCEPTION(6, "MSEN fail\n");
110 return;
111 }
112
113 request->op = TO_MSEN;
114 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
115 request->callback = (void *) __tape_34xx_medium_sense;
116 request->callback_data = NULL;
117 tape_do_io_async(device, request);
118}
119
100struct tape_34xx_work { 120struct tape_34xx_work {
101 struct tape_device *device; 121 struct tape_device *device;
102 enum tape_op op; 122 enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
109 * is inserted but cannot call tape_do_io* from an interrupt context. 129 * is inserted but cannot call tape_do_io* from an interrupt context.
110 * Maybe that's useful for other actions we want to start from the 130 * Maybe that's useful for other actions we want to start from the
111 * interrupt handler. 131 * interrupt handler.
132 * Note: the work handler is called by the system work queue. The tape
133 * commands started by the handler need to be asynchrounous, otherwise
134 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
112 */ 135 */
113static void 136static void
114tape_34xx_work_handler(struct work_struct *work) 137tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
119 142
120 switch(p->op) { 143 switch(p->op) {
121 case TO_MSEN: 144 case TO_MSEN:
122 tape_34xx_medium_sense(device); 145 tape_34xx_medium_sense_async(device);
123 break; 146 break;
124 default: 147 default:
125 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 148 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
@@ -1297,8 +1320,10 @@ tape_34xx_online(struct ccw_device *cdev)
1297} 1320}
1298 1321
1299static struct ccw_driver tape_34xx_driver = { 1322static struct ccw_driver tape_34xx_driver = {
1300 .name = "tape_34xx", 1323 .driver = {
1301 .owner = THIS_MODULE, 1324 .name = "tape_34xx",
1325 .owner = THIS_MODULE,
1326 },
1302 .ids = tape_34xx_ids, 1327 .ids = tape_34xx_ids,
1303 .probe = tape_generic_probe, 1328 .probe = tape_generic_probe,
1304 .remove = tape_generic_remove, 1329 .remove = tape_generic_remove,
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fc993acf99b6..a7d570728882 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -24,6 +24,8 @@
24#include "tape_std.h" 24#include "tape_std.h"
25#include "tape_3590.h" 25#include "tape_3590.h"
26 26
27static struct workqueue_struct *tape_3590_wq;
28
27/* 29/*
28 * Pointer to debug area. 30 * Pointer to debug area.
29 */ 31 */
@@ -31,7 +33,7 @@ debug_info_t *TAPE_DBF_AREA = NULL;
31EXPORT_SYMBOL(TAPE_DBF_AREA); 33EXPORT_SYMBOL(TAPE_DBF_AREA);
32 34
33/******************************************************************* 35/*******************************************************************
34 * Error Recovery fuctions: 36 * Error Recovery functions:
35 * - Read Opposite: implemented 37 * - Read Opposite: implemented
36 * - Read Device (buffered) log: BRA 38 * - Read Device (buffered) log: BRA
37 * - Read Library log: BRA 39 * - Read Library log: BRA
@@ -327,17 +329,17 @@ out:
327/* 329/*
328 * Enable encryption 330 * Enable encryption
329 */ 331 */
330static int tape_3592_enable_crypt(struct tape_device *device) 332static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
331{ 333{
332 struct tape_request *request; 334 struct tape_request *request;
333 char *data; 335 char *data;
334 336
335 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 337 DBF_EVENT(6, "tape_3592_enable_crypt\n");
336 if (!crypt_supported(device)) 338 if (!crypt_supported(device))
337 return -ENOSYS; 339 return ERR_PTR(-ENOSYS);
338 request = tape_alloc_request(2, 72); 340 request = tape_alloc_request(2, 72);
339 if (IS_ERR(request)) 341 if (IS_ERR(request))
340 return PTR_ERR(request); 342 return request;
341 data = request->cpdata; 343 data = request->cpdata;
342 memset(data,0,72); 344 memset(data,0,72);
343 345
@@ -352,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
352 request->op = TO_CRYPT_ON; 354 request->op = TO_CRYPT_ON;
353 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
354 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
357 return request;
358}
359
360static int tape_3592_enable_crypt(struct tape_device *device)
361{
362 struct tape_request *request;
363
364 request = __tape_3592_enable_crypt(device);
365 if (IS_ERR(request))
366 return PTR_ERR(request);
355 return tape_do_io_free(device, request); 367 return tape_do_io_free(device, request);
356} 368}
357 369
370static void tape_3592_enable_crypt_async(struct tape_device *device)
371{
372 struct tape_request *request;
373
374 request = __tape_3592_enable_crypt(device);
375 if (!IS_ERR(request))
376 tape_do_io_async_free(device, request);
377}
378
358/* 379/*
359 * Disable encryption 380 * Disable encryption
360 */ 381 */
361static int tape_3592_disable_crypt(struct tape_device *device) 382static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
362{ 383{
363 struct tape_request *request; 384 struct tape_request *request;
364 char *data; 385 char *data;
365 386
366 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 387 DBF_EVENT(6, "tape_3592_disable_crypt\n");
367 if (!crypt_supported(device)) 388 if (!crypt_supported(device))
368 return -ENOSYS; 389 return ERR_PTR(-ENOSYS);
369 request = tape_alloc_request(2, 72); 390 request = tape_alloc_request(2, 72);
370 if (IS_ERR(request)) 391 if (IS_ERR(request))
371 return PTR_ERR(request); 392 return request;
372 data = request->cpdata; 393 data = request->cpdata;
373 memset(data,0,72); 394 memset(data,0,72);
374 395
@@ -381,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
381 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 402 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
382 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 403 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
383 404
405 return request;
406}
407
408static int tape_3592_disable_crypt(struct tape_device *device)
409{
410 struct tape_request *request;
411
412 request = __tape_3592_disable_crypt(device);
413 if (IS_ERR(request))
414 return PTR_ERR(request);
384 return tape_do_io_free(device, request); 415 return tape_do_io_free(device, request);
385} 416}
386 417
418static void tape_3592_disable_crypt_async(struct tape_device *device)
419{
420 struct tape_request *request;
421
422 request = __tape_3592_disable_crypt(device);
423 if (!IS_ERR(request))
424 tape_do_io_async_free(device, request);
425}
426
387/* 427/*
388 * IOCTL: Set encryption status 428 * IOCTL: Set encryption status
389 */ 429 */
@@ -455,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
455/* 495/*
456 * SENSE Medium: Get Sense data about medium state 496 * SENSE Medium: Get Sense data about medium state
457 */ 497 */
458static int 498static int tape_3590_sense_medium(struct tape_device *device)
459tape_3590_sense_medium(struct tape_device *device)
460{ 499{
461 struct tape_request *request; 500 struct tape_request *request;
462 501
@@ -468,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
468 return tape_do_io_free(device, request); 507 return tape_do_io_free(device, request);
469} 508}
470 509
510static void tape_3590_sense_medium_async(struct tape_device *device)
511{
512 struct tape_request *request;
513
514 request = tape_alloc_request(1, 128);
515 if (IS_ERR(request))
516 return;
517 request->op = TO_MSEN;
518 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
519 tape_do_io_async_free(device, request);
520}
521
471/* 522/*
472 * MTTELL: Tell block. Return the number of block relative to current file. 523 * MTTELL: Tell block. Return the number of block relative to current file.
473 */ 524 */
@@ -544,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
544 * 2. The attention msg is written to the "read subsystem data" buffer. 595 * 2. The attention msg is written to the "read subsystem data" buffer.
545 * In this case we probably should print it to the console. 596 * In this case we probably should print it to the console.
546 */ 597 */
547static int 598static void tape_3590_read_attmsg_async(struct tape_device *device)
548tape_3590_read_attmsg(struct tape_device *device)
549{ 599{
550 struct tape_request *request; 600 struct tape_request *request;
551 char *buf; 601 char *buf;
552 602
553 request = tape_alloc_request(3, 4096); 603 request = tape_alloc_request(3, 4096);
554 if (IS_ERR(request)) 604 if (IS_ERR(request))
555 return PTR_ERR(request); 605 return;
556 request->op = TO_READ_ATTMSG; 606 request->op = TO_READ_ATTMSG;
557 buf = request->cpdata; 607 buf = request->cpdata;
558 buf[0] = PREP_RD_SS_DATA; 608 buf[0] = PREP_RD_SS_DATA;
@@ -560,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
560 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 610 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
561 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 611 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
562 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 612 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
563 return tape_do_io_free(device, request); 613 tape_do_io_async_free(device, request);
564} 614}
565 615
566/* 616/*
567 * These functions are used to schedule follow-up actions from within an 617 * These functions are used to schedule follow-up actions from within an
568 * interrupt context (like unsolicited interrupts). 618 * interrupt context (like unsolicited interrupts).
619 * Note: the work handler is called by the system work queue. The tape
620 * commands started by the handler need to be asynchrounous, otherwise
621 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
569 */ 622 */
570struct work_handler_data { 623struct work_handler_data {
571 struct tape_device *device; 624 struct tape_device *device;
@@ -581,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
581 634
582 switch (p->op) { 635 switch (p->op) {
583 case TO_MSEN: 636 case TO_MSEN:
584 tape_3590_sense_medium(p->device); 637 tape_3590_sense_medium_async(p->device);
585 break; 638 break;
586 case TO_READ_ATTMSG: 639 case TO_READ_ATTMSG:
587 tape_3590_read_attmsg(p->device); 640 tape_3590_read_attmsg_async(p->device);
588 break; 641 break;
589 case TO_CRYPT_ON: 642 case TO_CRYPT_ON:
590 tape_3592_enable_crypt(p->device); 643 tape_3592_enable_crypt_async(p->device);
591 break; 644 break;
592 case TO_CRYPT_OFF: 645 case TO_CRYPT_OFF:
593 tape_3592_disable_crypt(p->device); 646 tape_3592_disable_crypt_async(p->device);
594 break; 647 break;
595 default: 648 default:
596 DBF_EVENT(3, "T3590: work handler undefined for " 649 DBF_EVENT(3, "T3590: work handler undefined for "
@@ -613,7 +666,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
613 p->device = tape_get_device(device); 666 p->device = tape_get_device(device);
614 p->op = op; 667 p->op = op;
615 668
616 schedule_work(&p->work); 669 queue_work(tape_3590_wq, &p->work);
617 return 0; 670 return 0;
618} 671}
619 672
@@ -743,10 +796,8 @@ static void tape_3590_med_state_set(struct tape_device *device,
743static int 796static int
744tape_3590_done(struct tape_device *device, struct tape_request *request) 797tape_3590_done(struct tape_device *device, struct tape_request *request)
745{ 798{
746 struct tape_3590_disc_data *disc_data;
747 799
748 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 800 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
749 disc_data = device->discdata;
750 801
751 switch (request->op) { 802 switch (request->op) {
752 case TO_BSB: 803 case TO_BSB:
@@ -798,7 +849,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
798} 849}
799 850
800/* 851/*
801 * This fuction is called, when error recovery was successfull 852 * This function is called, when error recovery was successful
802 */ 853 */
803static inline int 854static inline int
804tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) 855tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
@@ -809,7 +860,7 @@ tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
809} 860}
810 861
811/* 862/*
812 * This fuction is called, when error recovery was not successfull 863 * This function is called, when error recovery was not successful
813 */ 864 */
814static inline int 865static inline int
815tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, 866tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
@@ -1341,17 +1392,12 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1341static int tape_3590_crypt_error(struct tape_device *device, 1392static int tape_3590_crypt_error(struct tape_device *device,
1342 struct tape_request *request, struct irb *irb) 1393 struct tape_request *request, struct irb *irb)
1343{ 1394{
1344 u8 cu_rc, ekm_rc1; 1395 u8 cu_rc;
1345 u16 ekm_rc2; 1396 u16 ekm_rc2;
1346 u32 drv_rc;
1347 const char *bus_id;
1348 char *sense; 1397 char *sense;
1349 1398
1350 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; 1399 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1351 bus_id = dev_name(&device->cdev->dev);
1352 cu_rc = sense[0]; 1400 cu_rc = sense[0];
1353 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1354 ekm_rc1 = sense[9];
1355 ekm_rc2 = *((u16*) &sense[10]); 1401 ekm_rc2 = *((u16*) &sense[10]);
1356 if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) 1402 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1357 /* key not defined on EKM */ 1403 /* key not defined on EKM */
@@ -1376,7 +1422,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1376 struct irb *irb) 1422 struct irb *irb)
1377{ 1423{
1378 struct tape_3590_sense *sense; 1424 struct tape_3590_sense *sense;
1379 int rc;
1380 1425
1381#ifdef CONFIG_S390_TAPE_BLOCK 1426#ifdef CONFIG_S390_TAPE_BLOCK
1382 if (request->op == TO_BLOCK) { 1427 if (request->op == TO_BLOCK) {
@@ -1401,7 +1446,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1401 * - "break": basic error recovery is done 1446 * - "break": basic error recovery is done
1402 * - "goto out:": just print error message if available 1447 * - "goto out:": just print error message if available
1403 */ 1448 */
1404 rc = -EIO;
1405 switch (sense->rc_rqc) { 1449 switch (sense->rc_rqc) {
1406 1450
1407 case 0x1110: 1451 case 0x1110:
@@ -1629,7 +1673,7 @@ fail_kmalloc:
1629static void 1673static void
1630tape_3590_cleanup_device(struct tape_device *device) 1674tape_3590_cleanup_device(struct tape_device *device)
1631{ 1675{
1632 flush_scheduled_work(); 1676 flush_workqueue(tape_3590_wq);
1633 tape_std_unassign(device); 1677 tape_std_unassign(device);
1634 1678
1635 kfree(device->discdata); 1679 kfree(device->discdata);
@@ -1708,8 +1752,10 @@ tape_3590_online(struct ccw_device *cdev)
1708} 1752}
1709 1753
1710static struct ccw_driver tape_3590_driver = { 1754static struct ccw_driver tape_3590_driver = {
1711 .name = "tape_3590", 1755 .driver = {
1712 .owner = THIS_MODULE, 1756 .name = "tape_3590",
1757 .owner = THIS_MODULE,
1758 },
1713 .ids = tape_3590_ids, 1759 .ids = tape_3590_ids,
1714 .probe = tape_generic_probe, 1760 .probe = tape_generic_probe,
1715 .remove = tape_generic_remove, 1761 .remove = tape_generic_remove,
@@ -1733,11 +1779,17 @@ tape_3590_init(void)
1733#endif 1779#endif
1734 1780
1735 DBF_EVENT(3, "3590 init\n"); 1781 DBF_EVENT(3, "3590 init\n");
1782
1783 tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
1784 if (!tape_3590_wq)
1785 return -ENOMEM;
1786
1736 /* Register driver for 3590 tapes. */ 1787 /* Register driver for 3590 tapes. */
1737 rc = ccw_driver_register(&tape_3590_driver); 1788 rc = ccw_driver_register(&tape_3590_driver);
1738 if (rc) 1789 if (rc) {
1790 destroy_workqueue(tape_3590_wq);
1739 DBF_EVENT(3, "3590 init failed\n"); 1791 DBF_EVENT(3, "3590 init failed\n");
1740 else 1792 } else
1741 DBF_EVENT(3, "3590 registered\n"); 1793 DBF_EVENT(3, "3590 registered\n");
1742 return rc; 1794 return rc;
1743} 1795}
@@ -1746,7 +1798,7 @@ static void
1746tape_3590_exit(void) 1798tape_3590_exit(void)
1747{ 1799{
1748 ccw_driver_unregister(&tape_3590_driver); 1800 ccw_driver_unregister(&tape_3590_driver);
1749 1801 destroy_workqueue(tape_3590_wq);
1750 debug_unregister(TAPE_DBF_AREA); 1802 debug_unregister(TAPE_DBF_AREA);
1751} 1803}
1752 1804
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
deleted file mode 100644
index 85cf607fc78f..000000000000
--- a/drivers/s390/char/tape_block.c
+++ /dev/null
@@ -1,444 +0,0 @@
1/*
2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
11 */
12
13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <linux/fs.h>
17#include <linux/module.h>
18#include <linux/blkdev.h>
19#include <linux/smp_lock.h>
20#include <linux/interrupt.h>
21#include <linux/buffer_head.h>
22#include <linux/kernel.h>
23
24#include <asm/debug.h>
25
26#define TAPE_DBF_AREA tape_core_dbf
27
28#include "tape.h"
29
30#define TAPEBLOCK_MAX_SEC 100
31#define TAPEBLOCK_MIN_REQUEUE 3
32
33/*
34 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
35 *
36 * In 2.5/2.6 the block device request function is very likely to be called
37 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
38 * just call any function that tries to allocate CCW requests from that con-
39 * text since it might sleep. There are two choices to work around this:
40 * a) do not allocate with kmalloc but use its own memory pool
41 * b) take requests from the queue outside that context, knowing that
42 * allocation might sleep
43 */
44
45/*
46 * file operation structure for tape block frontend
47 */
48static int tapeblock_open(struct block_device *, fmode_t);
49static int tapeblock_release(struct gendisk *, fmode_t);
50static int tapeblock_medium_changed(struct gendisk *);
51static int tapeblock_revalidate_disk(struct gendisk *);
52
53static const struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE,
55 .open = tapeblock_open,
56 .release = tapeblock_release,
57 .media_changed = tapeblock_medium_changed,
58 .revalidate_disk = tapeblock_revalidate_disk,
59};
60
61static int tapeblock_major = 0;
62
63static void
64tapeblock_trigger_requeue(struct tape_device *device)
65{
66 /* Protect against rescheduling. */
67 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
68 return;
69 schedule_work(&device->blk_data.requeue_task);
70}
71
72/*
73 * Post finished request.
74 */
75static void
76__tapeblock_end_request(struct tape_request *ccw_req, void *data)
77{
78 struct tape_device *device;
79 struct request *req;
80
81 DBF_LH(6, "__tapeblock_end_request()\n");
82
83 device = ccw_req->device;
84 req = (struct request *) data;
85 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
86 if (ccw_req->rc == 0)
87 /* Update position. */
88 device->blk_data.block_position =
89 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
90 else
91 /* We lost the position information due to an error. */
92 device->blk_data.block_position = -1;
93 device->discipline->free_bread(ccw_req);
94 if (!list_empty(&device->req_queue) ||
95 blk_peek_request(device->blk_data.request_queue))
96 tapeblock_trigger_requeue(device);
97}
98
99/*
100 * Feed the tape device CCW queue with requests supplied in a list.
101 */
102static int
103tapeblock_start_request(struct tape_device *device, struct request *req)
104{
105 struct tape_request * ccw_req;
106 int rc;
107
108 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
109
110 ccw_req = device->discipline->bread(device, req);
111 if (IS_ERR(ccw_req)) {
112 DBF_EVENT(1, "TBLOCK: bread failed\n");
113 blk_end_request_all(req, -EIO);
114 return PTR_ERR(ccw_req);
115 }
116 ccw_req->callback = __tapeblock_end_request;
117 ccw_req->callback_data = (void *) req;
118 ccw_req->retries = TAPEBLOCK_RETRIES;
119
120 rc = tape_do_io_async(device, ccw_req);
121 if (rc) {
122 /*
123 * Start/enqueueing failed. No retries in
124 * this case.
125 */
126 blk_end_request_all(req, -EIO);
127 device->discipline->free_bread(ccw_req);
128 }
129
130 return rc;
131}
132
133/*
134 * Move requests from the block device request queue to the tape device ccw
135 * queue.
136 */
137static void
138tapeblock_requeue(struct work_struct *work) {
139 struct tape_blk_data * blkdat;
140 struct tape_device * device;
141 struct request_queue * queue;
142 int nr_queued;
143 struct request * req;
144 struct list_head * l;
145 int rc;
146
147 blkdat = container_of(work, struct tape_blk_data, requeue_task);
148 device = blkdat->device;
149 if (!device)
150 return;
151
152 spin_lock_irq(get_ccwdev_lock(device->cdev));
153 queue = device->blk_data.request_queue;
154
155 /* Count number of requests on ccw queue. */
156 nr_queued = 0;
157 list_for_each(l, &device->req_queue)
158 nr_queued++;
159 spin_unlock(get_ccwdev_lock(device->cdev));
160
161 spin_lock_irq(&device->blk_data.request_queue_lock);
162 while (
163 !blk_queue_plugged(queue) &&
164 blk_peek_request(queue) &&
165 nr_queued < TAPEBLOCK_MIN_REQUEUE
166 ) {
167 req = blk_fetch_request(queue);
168 if (rq_data_dir(req) == WRITE) {
169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
170 spin_unlock_irq(&device->blk_data.request_queue_lock);
171 blk_end_request_all(req, -EIO);
172 spin_lock_irq(&device->blk_data.request_queue_lock);
173 continue;
174 }
175 nr_queued++;
176 spin_unlock_irq(&device->blk_data.request_queue_lock);
177 rc = tapeblock_start_request(device, req);
178 spin_lock_irq(&device->blk_data.request_queue_lock);
179 }
180 spin_unlock_irq(&device->blk_data.request_queue_lock);
181 atomic_set(&device->blk_data.requeue_scheduled, 0);
182}
183
184/*
185 * Tape request queue function. Called from ll_rw_blk.c
186 */
187static void
188tapeblock_request_fn(struct request_queue *queue)
189{
190 struct tape_device *device;
191
192 device = (struct tape_device *) queue->queuedata;
193 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
194 BUG_ON(device == NULL);
195 tapeblock_trigger_requeue(device);
196}
197
198/*
199 * This function is called for every new tapedevice
200 */
201int
202tapeblock_setup_device(struct tape_device * device)
203{
204 struct tape_blk_data * blkdat;
205 struct gendisk * disk;
206 int rc;
207
208 blkdat = &device->blk_data;
209 blkdat->device = device;
210 spin_lock_init(&blkdat->request_queue_lock);
211 atomic_set(&blkdat->requeue_scheduled, 0);
212
213 blkdat->request_queue = blk_init_queue(
214 tapeblock_request_fn,
215 &blkdat->request_queue_lock
216 );
217 if (!blkdat->request_queue)
218 return -ENOMEM;
219
220 rc = elevator_change(blkdat->request_queue, "noop");
221 if (rc)
222 goto cleanup_queue;
223
224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
225 blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
226 blk_queue_max_segments(blkdat->request_queue, -1L);
227 blk_queue_max_segment_size(blkdat->request_queue, -1L);
228 blk_queue_segment_boundary(blkdat->request_queue, -1L);
229
230 disk = alloc_disk(1);
231 if (!disk) {
232 rc = -ENOMEM;
233 goto cleanup_queue;
234 }
235
236 disk->major = tapeblock_major;
237 disk->first_minor = device->first_minor;
238 disk->fops = &tapeblock_fops;
239 disk->private_data = tape_get_device(device);
240 disk->queue = blkdat->request_queue;
241 set_capacity(disk, 0);
242 sprintf(disk->disk_name, "btibm%d",
243 device->first_minor / TAPE_MINORS_PER_DEV);
244
245 blkdat->disk = disk;
246 blkdat->medium_changed = 1;
247 blkdat->request_queue->queuedata = tape_get_device(device);
248
249 add_disk(disk);
250
251 tape_get_device(device);
252 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
253
254 return 0;
255
256cleanup_queue:
257 blk_cleanup_queue(blkdat->request_queue);
258 blkdat->request_queue = NULL;
259
260 return rc;
261}
262
263void
264tapeblock_cleanup_device(struct tape_device *device)
265{
266 flush_scheduled_work();
267 tape_put_device(device);
268
269 if (!device->blk_data.disk) {
270 goto cleanup_queue;
271 }
272
273 del_gendisk(device->blk_data.disk);
274 device->blk_data.disk->private_data = NULL;
275 tape_put_device(device);
276 put_disk(device->blk_data.disk);
277
278 device->blk_data.disk = NULL;
279cleanup_queue:
280 device->blk_data.request_queue->queuedata = NULL;
281 tape_put_device(device);
282
283 blk_cleanup_queue(device->blk_data.request_queue);
284 device->blk_data.request_queue = NULL;
285}
286
287/*
288 * Detect number of blocks of the tape.
289 * FIXME: can we extent this to detect the blocks size as well ?
290 */
291static int
292tapeblock_revalidate_disk(struct gendisk *disk)
293{
294 struct tape_device * device;
295 unsigned int nr_of_blks;
296 int rc;
297
298 device = (struct tape_device *) disk->private_data;
299 BUG_ON(!device);
300
301 if (!device->blk_data.medium_changed)
302 return 0;
303
304 rc = tape_mtop(device, MTFSFM, 1);
305 if (rc)
306 return rc;
307
308 rc = tape_mtop(device, MTTELL, 1);
309 if (rc < 0)
310 return rc;
311
312 pr_info("%s: Determining the size of the recorded area...\n",
313 dev_name(&device->cdev->dev));
314 DBF_LH(3, "Image file ends at %d\n", rc);
315 nr_of_blks = rc;
316
317 /* This will fail for the first file. Catch the error by checking the
318 * position. */
319 tape_mtop(device, MTBSF, 1);
320
321 rc = tape_mtop(device, MTTELL, 1);
322 if (rc < 0)
323 return rc;
324
325 if (rc > nr_of_blks)
326 return -EINVAL;
327
328 DBF_LH(3, "Image file starts at %d\n", rc);
329 device->bof = rc;
330 nr_of_blks -= rc;
331
332 pr_info("%s: The size of the recorded area is %i blocks\n",
333 dev_name(&device->cdev->dev), nr_of_blks);
334 set_capacity(device->blk_data.disk,
335 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
336
337 device->blk_data.block_position = 0;
338 device->blk_data.medium_changed = 0;
339 return 0;
340}
341
342static int
343tapeblock_medium_changed(struct gendisk *disk)
344{
345 struct tape_device *device;
346
347 device = (struct tape_device *) disk->private_data;
348 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
349 device, device->blk_data.medium_changed);
350
351 return device->blk_data.medium_changed;
352}
353
354/*
355 * Block frontend tape device open function.
356 */
357static int
358tapeblock_open(struct block_device *bdev, fmode_t mode)
359{
360 struct gendisk * disk = bdev->bd_disk;
361 struct tape_device * device;
362 int rc;
363
364 lock_kernel();
365 device = tape_get_device(disk->private_data);
366
367 if (device->required_tapemarks) {
368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
369 pr_warning("%s: Opening the tape failed because of missing "
370 "end-of-file marks\n", dev_name(&device->cdev->dev));
371 rc = -EPERM;
372 goto put_device;
373 }
374
375 rc = tape_open(device);
376 if (rc)
377 goto put_device;
378
379 rc = tapeblock_revalidate_disk(disk);
380 if (rc)
381 goto release;
382
383 /*
384 * Note: The reference to <device> is hold until the release function
385 * is called.
386 */
387 tape_state_set(device, TS_BLKUSE);
388 unlock_kernel();
389 return 0;
390
391release:
392 tape_release(device);
393 put_device:
394 tape_put_device(device);
395 unlock_kernel();
396 return rc;
397}
398
399/*
400 * Block frontend tape device release function.
401 *
402 * Note: One reference to the tape device was made by the open function. So
403 * we just get the pointer here and release the reference.
404 */
405static int
406tapeblock_release(struct gendisk *disk, fmode_t mode)
407{
408 struct tape_device *device = disk->private_data;
409
410 lock_kernel();
411 tape_state_set(device, TS_IN_USE);
412 tape_release(device);
413 tape_put_device(device);
414 unlock_kernel();
415
416 return 0;
417}
418
419/*
420 * Initialize block device frontend.
421 */
422int
423tapeblock_init(void)
424{
425 int rc;
426
427 /* Register the tape major number to the kernel */
428 rc = register_blkdev(tapeblock_major, "tBLK");
429 if (rc < 0)
430 return rc;
431
432 if (tapeblock_major == 0)
433 tapeblock_major = rc;
434 return 0;
435}
436
437/*
438 * Deregister major for block device frontend
439 */
440void
441tapeblock_exit(void)
442{
443 unregister_blkdev(tapeblock_major, "tBLK");
444}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 539045acaad4..87cd0ab242de 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -17,7 +17,6 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/mtio.h> 19#include <linux/mtio.h>
20#include <linux/smp_lock.h>
21#include <linux/compat.h> 20#include <linux/compat.h>
22 21
23#include <asm/uaccess.h> 22#include <asm/uaccess.h>
@@ -53,6 +52,7 @@ static const struct file_operations tape_fops =
53#endif 52#endif
54 .open = tapechar_open, 53 .open = tapechar_open,
55 .release = tapechar_release, 54 .release = tapechar_release,
55 .llseek = no_llseek,
56}; 56};
57 57
58static int tapechar_major = TAPECHAR_MAJOR; 58static int tapechar_major = TAPECHAR_MAJOR;
@@ -139,7 +139,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
139 /* 139 /*
140 * If the tape isn't terminated yet, do it now. And since we then 140 * If the tape isn't terminated yet, do it now. And since we then
141 * are at the end of the tape there wouldn't be anything to read 141 * are at the end of the tape there wouldn't be anything to read
142 * anyways. So we return immediatly. 142 * anyways. So we return immediately.
143 */ 143 */
144 if(device->required_tapemarks) { 144 if(device->required_tapemarks) {
145 return tape_std_terminate_write(device); 145 return tape_std_terminate_write(device);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 707b7f48c232..9e32780c317f 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/major.h> 16#include <linux/major.h>
17#include <linux/kobject.h>
18#include <linux/kobj_map.h> 17#include <linux/kobj_map.h>
19#include <linux/cdev.h> 18#include <linux/cdev.h>
20 19
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 29c2d73d719d..7978a0adeaf3 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,6 +14,7 @@
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 16
17#include <linux/kernel_stat.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> // for kernel parameters 19#include <linux/init.h> // for kernel parameters
19#include <linux/kmod.h> // for requesting modules 20#include <linux/kmod.h> // for requesting modules
@@ -209,29 +210,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate)
209 wake_up(&device->state_change_wq); 210 wake_up(&device->state_change_wq);
210} 211}
211 212
213struct tape_med_state_work_data {
214 struct tape_device *device;
215 enum tape_medium_state state;
216 struct work_struct work;
217};
218
219static void
220tape_med_state_work_handler(struct work_struct *work)
221{
222 static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
223 static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
224 struct tape_med_state_work_data *p =
225 container_of(work, struct tape_med_state_work_data, work);
226 struct tape_device *device = p->device;
227 char *envp[] = { NULL, NULL };
228
229 switch (p->state) {
230 case MS_UNLOADED:
231 pr_info("%s: The tape cartridge has been successfully "
232 "unloaded\n", dev_name(&device->cdev->dev));
233 envp[0] = env_state_unloaded;
234 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
235 break;
236 case MS_LOADED:
237 pr_info("%s: A tape cartridge has been mounted\n",
238 dev_name(&device->cdev->dev));
239 envp[0] = env_state_loaded;
240 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
241 break;
242 default:
243 break;
244 }
245 tape_put_device(device);
246 kfree(p);
247}
248
249static void
250tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
251{
252 struct tape_med_state_work_data *p;
253
254 p = kzalloc(sizeof(*p), GFP_ATOMIC);
255 if (p) {
256 INIT_WORK(&p->work, tape_med_state_work_handler);
257 p->device = tape_get_device(device);
258 p->state = state;
259 schedule_work(&p->work);
260 }
261}
262
212void 263void
213tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 264tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
214{ 265{
215 if (device->medium_state == newstate) 266 enum tape_medium_state oldstate;
267
268 oldstate = device->medium_state;
269 if (oldstate == newstate)
216 return; 270 return;
271 device->medium_state = newstate;
217 switch(newstate){ 272 switch(newstate){
218 case MS_UNLOADED: 273 case MS_UNLOADED:
219 device->tape_generic_status |= GMT_DR_OPEN(~0); 274 device->tape_generic_status |= GMT_DR_OPEN(~0);
220 if (device->medium_state == MS_LOADED) 275 if (oldstate == MS_LOADED)
221 pr_info("%s: The tape cartridge has been successfully " 276 tape_med_state_work(device, MS_UNLOADED);
222 "unloaded\n", dev_name(&device->cdev->dev));
223 break; 277 break;
224 case MS_LOADED: 278 case MS_LOADED:
225 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 279 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
226 if (device->medium_state == MS_UNLOADED) 280 if (oldstate == MS_UNLOADED)
227 pr_info("%s: A tape cartridge has been mounted\n", 281 tape_med_state_work(device, MS_LOADED);
228 dev_name(&device->cdev->dev));
229 break; 282 break;
230 default: 283 default:
231 // print nothing
232 break; 284 break;
233 } 285 }
234 device->medium_state = newstate;
235 wake_up(&device->state_change_wq); 286 wake_up(&device->state_change_wq);
236} 287}
237 288
@@ -1064,6 +1115,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1064 struct tape_request *request; 1115 struct tape_request *request;
1065 int rc; 1116 int rc;
1066 1117
1118 kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
1067 device = dev_get_drvdata(&cdev->dev); 1119 device = dev_get_drvdata(&cdev->dev);
1068 if (device == NULL) { 1120 if (device == NULL) {
1069 return; 1121 return;
@@ -1077,15 +1129,14 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1077 /* FIXME: What to do with the request? */ 1129 /* FIXME: What to do with the request? */
1078 switch (PTR_ERR(irb)) { 1130 switch (PTR_ERR(irb)) {
1079 case -ETIMEDOUT: 1131 case -ETIMEDOUT:
1080 DBF_LH(1, "(%s): Request timed out\n", 1132 DBF_LH(1, "(%08x): Request timed out\n",
1081 dev_name(&cdev->dev)); 1133 device->cdev_id);
1082 case -EIO: 1134 case -EIO:
1083 __tape_end_request(device, request, -EIO); 1135 __tape_end_request(device, request, -EIO);
1084 break; 1136 break;
1085 default: 1137 default:
1086 DBF_LH(1, "(%s): Unexpected i/o error %li\n", 1138 DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
1087 dev_name(&cdev->dev), 1139 device->cdev_id, PTR_ERR(irb));
1088 PTR_ERR(irb));
1089 } 1140 }
1090 return; 1141 return;
1091 } 1142 }
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 03f07e5dd6e9..e7650170274a 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -47,8 +47,8 @@ tape_std_assign_timeout(unsigned long data)
47 device->cdev_id); 47 device->cdev_id);
48 rc = tape_cancel_io(device, request); 48 rc = tape_cancel_io(device, request);
49 if(rc) 49 if(rc)
50 DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n", 50 DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
51 dev_name(&device->cdev->dev), rc); 51 "%i\n", device->cdev_id, rc);
52} 52}
53 53
54int 54int
@@ -564,7 +564,6 @@ int
564tape_std_mtreten(struct tape_device *device, int mt_count) 564tape_std_mtreten(struct tape_device *device, int mt_count)
565{ 565{
566 struct tape_request *request; 566 struct tape_request *request;
567 int rc;
568 567
569 request = tape_alloc_request(4, 0); 568 request = tape_alloc_request(4, 0);
570 if (IS_ERR(request)) 569 if (IS_ERR(request))
@@ -576,7 +575,7 @@ tape_std_mtreten(struct tape_device *device, int mt_count)
576 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); 575 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
577 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); 576 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
578 /* execute it, MTRETEN rc gets ignored */ 577 /* execute it, MTRETEN rc gets ignored */
579 rc = tape_do_io_interruptible(device, request); 578 tape_do_io_interruptible(device, request);
580 tape_free_request(request); 579 tape_free_request(request);
581 return tape_mtop(device, MTREW, 1); 580 return tape_mtop(device, MTREW, 1);
582} 581}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 911822db614d..2db1482b406e 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -328,7 +328,7 @@ tty3270_write_callback(struct raw3270_request *rq, void *data)
328 328
329 tp = (struct tty3270 *) rq->view; 329 tp = (struct tty3270 *) rq->view;
330 if (rq->rc != 0) { 330 if (rq->rc != 0) {
331 /* Write wasn't successfull. Refresh all. */ 331 /* Write wasn't successful. Refresh all. */
332 tp->update_flags = TTY_UPDATE_ALL; 332 tp->update_flags = TTY_UPDATE_ALL;
333 tty3270_set_timer(tp, 1); 333 tty3270_set_timer(tp, 1);
334 } 334 }
@@ -1718,9 +1718,8 @@ tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
1718{ 1718{
1719} 1719}
1720 1720
1721static int 1721static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
1722tty3270_ioctl(struct tty_struct *tty, struct file *file, 1722 unsigned long arg)
1723 unsigned int cmd, unsigned long arg)
1724{ 1723{
1725 struct tty3270 *tp; 1724 struct tty3270 *tp;
1726 1725
@@ -1729,13 +1728,12 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
1729 return -ENODEV; 1728 return -ENODEV;
1730 if (tty->flags & (1 << TTY_IO_ERROR)) 1729 if (tty->flags & (1 << TTY_IO_ERROR))
1731 return -EIO; 1730 return -EIO;
1732 return kbd_ioctl(tp->kbd, file, cmd, arg); 1731 return kbd_ioctl(tp->kbd, cmd, arg);
1733} 1732}
1734 1733
1735#ifdef CONFIG_COMPAT 1734#ifdef CONFIG_COMPAT
1736static long 1735static long tty3270_compat_ioctl(struct tty_struct *tty,
1737tty3270_compat_ioctl(struct tty_struct *tty, struct file *file, 1736 unsigned int cmd, unsigned long arg)
1738 unsigned int cmd, unsigned long arg)
1739{ 1737{
1740 struct tty3270 *tp; 1738 struct tty3270 *tp;
1741 1739
@@ -1744,7 +1742,7 @@ tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
1744 return -ENODEV; 1742 return -ENODEV;
1745 if (tty->flags & (1 << TTY_IO_ERROR)) 1743 if (tty->flags & (1 << TTY_IO_ERROR))
1746 return -EIO; 1744 return -EIO;
1747 return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg)); 1745 return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
1748} 1746}
1749#endif 1747#endif
1750 1748
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 04e532eec032..31a3ccbb6495 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -47,7 +47,7 @@ static int vmcp_release(struct inode *inode, struct file *file)
47{ 47{
48 struct vmcp_session *session; 48 struct vmcp_session *session;
49 49
50 session = (struct vmcp_session *)file->private_data; 50 session = file->private_data;
51 file->private_data = NULL; 51 file->private_data = NULL;
52 free_pages((unsigned long)session->response, get_order(session->bufsize)); 52 free_pages((unsigned long)session->response, get_order(session->bufsize));
53 kfree(session); 53 kfree(session);
@@ -94,7 +94,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
94 return -EFAULT; 94 return -EFAULT;
95 } 95 }
96 cmd[count] = '\0'; 96 cmd[count] = '\0';
97 session = (struct vmcp_session *)file->private_data; 97 session = file->private_data;
98 if (mutex_lock_interruptible(&session->mutex)) { 98 if (mutex_lock_interruptible(&session->mutex)) {
99 kfree(cmd); 99 kfree(cmd);
100 return -ERESTARTSYS; 100 return -ERESTARTSYS;
@@ -136,7 +136,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
136 int __user *argp; 136 int __user *argp;
137 int temp; 137 int temp;
138 138
139 session = (struct vmcp_session *)file->private_data; 139 session = file->private_data;
140 if (is_compat_task()) 140 if (is_compat_task())
141 argp = compat_ptr(arg); 141 argp = compat_ptr(arg);
142 else 142 else
@@ -177,6 +177,7 @@ static const struct file_operations vmcp_fops = {
177 .write = vmcp_write, 177 .write = vmcp_write,
178 .unlocked_ioctl = vmcp_ioctl, 178 .unlocked_ioctl = vmcp_ioctl,
179 .compat_ioctl = vmcp_ioctl, 179 .compat_ioctl = vmcp_ioctl,
180 .llseek = no_llseek,
180}; 181};
181 182
182static struct miscdevice vmcp_dev = { 183static struct miscdevice vmcp_dev = {
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e40a1b892866..c837d7419a6a 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -30,7 +30,6 @@
30#include <linux/kmod.h> 30#include <linux/kmod.h>
31#include <linux/cdev.h> 31#include <linux/cdev.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/smp_lock.h>
34#include <linux/string.h> 33#include <linux/string.h>
35 34
36MODULE_AUTHOR 35MODULE_AUTHOR
@@ -97,6 +96,7 @@ static const struct file_operations vmlogrdr_fops = {
97 .open = vmlogrdr_open, 96 .open = vmlogrdr_open,
98 .release = vmlogrdr_release, 97 .release = vmlogrdr_release,
99 .read = vmlogrdr_read, 98 .read = vmlogrdr_read,
99 .llseek = no_llseek,
100}; 100};
101 101
102 102
@@ -214,7 +214,7 @@ static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
214 214
215static int vmlogrdr_get_recording_class_AB(void) 215static int vmlogrdr_get_recording_class_AB(void)
216{ 216{
217 char cp_command[]="QUERY COMMAND RECORDING "; 217 static const char cp_command[] = "QUERY COMMAND RECORDING ";
218 char cp_response[80]; 218 char cp_response[80];
219 char *tail; 219 char *tail;
220 int len,i; 220 int len,i;
@@ -248,27 +248,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
248 char cp_command[80]; 248 char cp_command[80];
249 char cp_response[160]; 249 char cp_response[160];
250 char *onoff, *qid_string; 250 char *onoff, *qid_string;
251 int rc;
251 252
252 memset(cp_command, 0x00, sizeof(cp_command)); 253 onoff = ((action == 1) ? "ON" : "OFF");
253 memset(cp_response, 0x00, sizeof(cp_response));
254
255 onoff = ((action == 1) ? "ON" : "OFF");
256 qid_string = ((recording_class_AB == 1) ? " QID * " : ""); 254 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
257 255
258 /* 256 /*
259 * The recording commands needs to be called with option QID 257 * The recording commands needs to be called with option QID
260 * for guests that have previlege classes A or B. 258 * for guests that have previlege classes A or B.
261 * Purging has to be done as separate step, because recording 259 * Purging has to be done as separate step, because recording
262 * can't be switched on as long as records are on the queue. 260 * can't be switched on as long as records are on the queue.
263 * Doing both at the same time doesn't work. 261 * Doing both at the same time doesn't work.
264 */ 262 */
265 263 if (purge && (action == 1)) {
266 if (purge) { 264 memset(cp_command, 0x00, sizeof(cp_command));
265 memset(cp_response, 0x00, sizeof(cp_response));
267 snprintf(cp_command, sizeof(cp_command), 266 snprintf(cp_command, sizeof(cp_command),
268 "RECORDING %s PURGE %s", 267 "RECORDING %s PURGE %s",
269 logptr->recording_name, 268 logptr->recording_name,
270 qid_string); 269 qid_string);
271
272 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 270 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
273 } 271 }
274 272
@@ -278,19 +276,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
278 logptr->recording_name, 276 logptr->recording_name,
279 onoff, 277 onoff,
280 qid_string); 278 qid_string);
281
282 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 279 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
283 /* The recording command will usually answer with 'Command complete' 280 /* The recording command will usually answer with 'Command complete'
284 * on success, but when the specific service was never connected 281 * on success, but when the specific service was never connected
285 * before then there might be an additional informational message 282 * before then there might be an additional informational message
286 * 'HCPCRC8072I Recording entry not found' before the 283 * 'HCPCRC8072I Recording entry not found' before the
287 * 'Command complete'. So I use strstr rather then the strncmp. 284 * 'Command complete'. So I use strstr rather then the strncmp.
288 */ 285 */
289 if (strstr(cp_response,"Command complete")) 286 if (strstr(cp_response,"Command complete"))
290 return 0; 287 rc = 0;
291 else 288 else
292 return -EIO; 289 rc = -EIO;
290 /*
291 * If we turn recording off, we have to purge any remaining records
292 * afterwards, as a large number of queued records may impact z/VM
293 * performance.
294 */
295 if (purge && (action == 0)) {
296 memset(cp_command, 0x00, sizeof(cp_command));
297 memset(cp_response, 0x00, sizeof(cp_response));
298 snprintf(cp_command, sizeof(cp_command),
299 "RECORDING %s PURGE %s",
300 logptr->recording_name,
301 qid_string);
302 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
303 }
293 304
305 return rc;
294} 306}
295 307
296 308
@@ -637,7 +649,7 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
637 char *buf) 649 char *buf)
638{ 650{
639 651
640 char cp_command[] = "QUERY RECORDING "; 652 static const char cp_command[] = "QUERY RECORDING ";
641 int len; 653 int len;
642 654
643 cpcmd(cp_command, buf, 4096, NULL); 655 cpcmd(cp_command, buf, 4096, NULL);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 1de672f21037..f6b00c3df425 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,9 +11,9 @@
11#define KMSG_COMPONENT "vmur" 11#define KMSG_COMPONENT "vmur"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/cdev.h> 15#include <linux/cdev.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/smp_lock.h>
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/cio.h> 19#include <asm/cio.h>
@@ -64,8 +64,10 @@ static int ur_set_offline(struct ccw_device *cdev);
64static int ur_pm_suspend(struct ccw_device *cdev); 64static int ur_pm_suspend(struct ccw_device *cdev);
65 65
66static struct ccw_driver ur_driver = { 66static struct ccw_driver ur_driver = {
67 .name = "vmur", 67 .driver = {
68 .owner = THIS_MODULE, 68 .name = "vmur",
69 .owner = THIS_MODULE,
70 },
69 .ids = ur_ids, 71 .ids = ur_ids,
70 .probe = ur_probe, 72 .probe = ur_probe,
71 .remove = ur_remove, 73 .remove = ur_remove,
@@ -303,6 +305,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
303{ 305{
304 struct urdev *urd; 306 struct urdev *urd;
305 307
308 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
306 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 309 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
307 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 310 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
308 irb->scsw.cmd.count); 311 irb->scsw.cmd.count);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index e13508c98b1a..12ef9121d4f0 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -297,6 +297,7 @@ static const struct file_operations vmwdt_fops = {
297 .unlocked_ioctl = &vmwdt_ioctl, 297 .unlocked_ioctl = &vmwdt_ioctl,
298 .write = &vmwdt_write, 298 .write = &vmwdt_write,
299 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
300 .llseek = noop_llseek,
300}; 301};
301 302
302static struct miscdevice vmwdt_dev = { 303static struct miscdevice vmwdt_dev = {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f5ea3384a4b9..3b94044027c2 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -459,6 +459,7 @@ static const struct file_operations zcore_memmap_fops = {
459 .read = zcore_memmap_read, 459 .read = zcore_memmap_read,
460 .open = zcore_memmap_open, 460 .open = zcore_memmap_open,
461 .release = zcore_memmap_release, 461 .release = zcore_memmap_release,
462 .llseek = no_llseek,
462}; 463};
463 464
464static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, 465static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
@@ -486,6 +487,7 @@ static const struct file_operations zcore_reipl_fops = {
486 .write = zcore_reipl_write, 487 .write = zcore_reipl_write,
487 .open = zcore_reipl_open, 488 .open = zcore_reipl_open,
488 .release = zcore_reipl_release, 489 .release = zcore_reipl_release,
490 .llseek = no_llseek,
489}; 491};
490 492
491#ifdef CONFIG_32BIT 493#ifdef CONFIG_32BIT
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 13cb60162e42..76058a5166ed 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -79,17 +79,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
79 int max_digit, int max_val) 79 int max_digit, int max_val)
80{ 80{
81 int diff; 81 int diff;
82 unsigned int value;
83 82
84 diff = 0; 83 diff = 0;
85 *val = 0; 84 *val = 0;
86 85
87 while (isxdigit(**cp) && (diff <= max_digit)) { 86 while (diff <= max_digit) {
87 int value = hex_to_bin(**cp);
88 88
89 if (isdigit(**cp)) 89 if (value < 0)
90 value = **cp - '0'; 90 break;
91 else
92 value = tolower(**cp) - 'a' + 10;
93 *val = *val * 16 + value; 91 *val = *val * 16 + value;
94 (*cp)++; 92 (*cp)++;
95 diff++; 93 diff++;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 97b25d68e3e7..5c567414c4bb 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -67,6 +67,27 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
67} 67}
68 68
69/* 69/*
70 * Remove references from ccw devices to ccw group device and from
71 * ccw group device to ccw devices.
72 */
73static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
74{
75 struct ccw_device *cdev;
76 int i;
77
78 for (i = 0; i < gdev->count; i++) {
79 cdev = gdev->cdev[i];
80 if (!cdev)
81 continue;
82 spin_lock_irq(cdev->ccwlock);
83 dev_set_drvdata(&cdev->dev, NULL);
84 spin_unlock_irq(cdev->ccwlock);
85 gdev->cdev[i] = NULL;
86 put_device(&cdev->dev);
87 }
88}
89
90/*
70 * Provide an 'ungroup' attribute so the user can remove group devices no 91 * Provide an 'ungroup' attribute so the user can remove group devices no
71 * longer needed or accidentially created. Saves memory :) 92 * longer needed or accidentially created. Saves memory :)
72 */ 93 */
@@ -78,6 +99,7 @@ static void ccwgroup_ungroup_callback(struct device *dev)
78 if (device_is_registered(&gdev->dev)) { 99 if (device_is_registered(&gdev->dev)) {
79 __ccwgroup_remove_symlinks(gdev); 100 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev); 101 device_unregister(dev);
102 __ccwgroup_remove_cdev_refs(gdev);
81 } 103 }
82 mutex_unlock(&gdev->reg_mutex); 104 mutex_unlock(&gdev->reg_mutex);
83} 105}
@@ -116,21 +138,7 @@ static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
116static void 138static void
117ccwgroup_release (struct device *dev) 139ccwgroup_release (struct device *dev)
118{ 140{
119 struct ccwgroup_device *gdev; 141 kfree(to_ccwgroupdev(dev));
120 int i;
121
122 gdev = to_ccwgroupdev(dev);
123
124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
130 put_device(&gdev->cdev[i]->dev);
131 }
132 }
133 kfree(gdev);
134} 142}
135 143
136static int 144static int
@@ -420,7 +428,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
420 gdev = to_ccwgroupdev(dev); 428 gdev = to_ccwgroupdev(dev);
421 gdrv = to_ccwgroupdrv(dev->driver); 429 gdrv = to_ccwgroupdrv(dev->driver);
422 430
423 if (!try_module_get(gdrv->owner)) 431 if (!try_module_get(gdrv->driver.owner))
424 return -EINVAL; 432 return -EINVAL;
425 433
426 ret = strict_strtoul(buf, 0, &value); 434 ret = strict_strtoul(buf, 0, &value);
@@ -434,7 +442,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
434 else 442 else
435 ret = -EINVAL; 443 ret = -EINVAL;
436out: 444out:
437 module_put(gdrv->owner); 445 module_put(gdrv->driver.owner);
438 return (ret == 0) ? count : ret; 446 return (ret == 0) ? count : ret;
439} 447}
440 448
@@ -608,8 +616,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
608{ 616{
609 /* register our new driver with the core */ 617 /* register our new driver with the core */
610 cdriver->driver.bus = &ccwgroup_bus_type; 618 cdriver->driver.bus = &ccwgroup_bus_type;
611 cdriver->driver.name = cdriver->name;
612 cdriver->driver.owner = cdriver->owner;
613 619
614 return driver_register(&cdriver->driver); 620 return driver_register(&cdriver->driver);
615} 621}
@@ -639,6 +645,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
639 mutex_lock(&gdev->reg_mutex); 645 mutex_lock(&gdev->reg_mutex);
640 __ccwgroup_remove_symlinks(gdev); 646 __ccwgroup_remove_symlinks(gdev);
641 device_unregister(dev); 647 device_unregister(dev);
648 __ccwgroup_remove_cdev_refs(gdev);
642 mutex_unlock(&gdev->reg_mutex); 649 mutex_unlock(&gdev->reg_mutex);
643 put_device(dev); 650 put_device(dev);
644 } 651 }
@@ -660,25 +667,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
660 return 0; 667 return 0;
661} 668}
662 669
663static struct ccwgroup_device *
664__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
665{
666 struct ccwgroup_device *gdev;
667
668 gdev = dev_get_drvdata(&cdev->dev);
669 if (gdev) {
670 if (get_device(&gdev->dev)) {
671 mutex_lock(&gdev->reg_mutex);
672 if (device_is_registered(&gdev->dev))
673 return gdev;
674 mutex_unlock(&gdev->reg_mutex);
675 put_device(&gdev->dev);
676 }
677 return NULL;
678 }
679 return NULL;
680}
681
682/** 670/**
683 * ccwgroup_remove_ccwdev() - remove function for slave devices 671 * ccwgroup_remove_ccwdev() - remove function for slave devices
684 * @cdev: ccw device to be removed 672 * @cdev: ccw device to be removed
@@ -694,13 +682,25 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
694 /* Ignore offlining errors, device is gone anyway. */ 682 /* Ignore offlining errors, device is gone anyway. */
695 ccw_device_set_offline(cdev); 683 ccw_device_set_offline(cdev);
696 /* If one of its devices is gone, the whole group is done for. */ 684 /* If one of its devices is gone, the whole group is done for. */
697 gdev = __ccwgroup_get_gdev_by_cdev(cdev); 685 spin_lock_irq(cdev->ccwlock);
698 if (gdev) { 686 gdev = dev_get_drvdata(&cdev->dev);
687 if (!gdev) {
688 spin_unlock_irq(cdev->ccwlock);
689 return;
690 }
691 /* Get ccwgroup device reference for local processing. */
692 get_device(&gdev->dev);
693 spin_unlock_irq(cdev->ccwlock);
694 /* Unregister group device. */
695 mutex_lock(&gdev->reg_mutex);
696 if (device_is_registered(&gdev->dev)) {
699 __ccwgroup_remove_symlinks(gdev); 697 __ccwgroup_remove_symlinks(gdev);
700 device_unregister(&gdev->dev); 698 device_unregister(&gdev->dev);
701 mutex_unlock(&gdev->reg_mutex); 699 __ccwgroup_remove_cdev_refs(gdev);
702 put_device(&gdev->dev);
703 } 700 }
701 mutex_unlock(&gdev->reg_mutex);
702 /* Release ccwgroup device reference for local processing. */
703 put_device(&gdev->dev);
704} 704}
705 705
706MODULE_LICENSE("GPL"); 706MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 6c9fa15aac7b..2d32233943a9 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.c 2 * drivers/s390/cio/chp.c
3 * 3 *
4 * Copyright IBM Corp. 1999,2007 4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com) 6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -54,12 +54,6 @@ static struct work_struct cfg_work;
54/* Wait queue for configure completion events. */ 54/* Wait queue for configure completion events. */
55static wait_queue_head_t cfg_wait_queue; 55static wait_queue_head_t cfg_wait_queue;
56 56
57/* Return channel_path struct for given chpid. */
58static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
59{
60 return channel_subsystems[chpid.cssid]->chps[chpid.id];
61}
62
63/* Set vary state for given chpid. */ 57/* Set vary state for given chpid. */
64static void set_chp_logically_online(struct chp_id chpid, int onoff) 58static void set_chp_logically_online(struct chp_id chpid, int onoff)
65{ 59{
@@ -241,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
241 struct device_attribute *attr, char *buf) 235 struct device_attribute *attr, char *buf)
242{ 236{
243 struct channel_path *chp = to_channelpath(dev); 237 struct channel_path *chp = to_channelpath(dev);
238 int status;
244 239
245 if (!chp) 240 mutex_lock(&chp->lock);
246 return 0; 241 status = chp->state;
247 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : 242 mutex_unlock(&chp->lock);
248 sprintf(buf, "offline\n")); 243
244 return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
249} 245}
250 246
251static ssize_t chp_status_write(struct device *dev, 247static ssize_t chp_status_write(struct device *dev,
@@ -261,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
261 if (!num_args) 257 if (!num_args)
262 return count; 258 return count;
263 259
264 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) 260 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
261 mutex_lock(&cp->lock);
265 error = s390_vary_chpid(cp->chpid, 1); 262 error = s390_vary_chpid(cp->chpid, 1);
266 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) 263 mutex_unlock(&cp->lock);
264 } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
265 mutex_lock(&cp->lock);
267 error = s390_vary_chpid(cp->chpid, 0); 266 error = s390_vary_chpid(cp->chpid, 0);
268 else 267 mutex_unlock(&cp->lock);
268 } else
269 error = -EINVAL; 269 error = -EINVAL;
270 270
271 return error < 0 ? error : count; 271 return error < 0 ? error : count;
272
273} 272}
274 273
275static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 274static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -315,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
315 char *buf) 314 char *buf)
316{ 315{
317 struct channel_path *chp = to_channelpath(dev); 316 struct channel_path *chp = to_channelpath(dev);
317 u8 type;
318 318
319 if (!chp) 319 mutex_lock(&chp->lock);
320 return 0; 320 type = chp->desc.desc;
321 return sprintf(buf, "%x\n", chp->desc.desc); 321 mutex_unlock(&chp->lock);
322 return sprintf(buf, "%x\n", type);
322} 323}
323 324
324static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 325static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -395,6 +396,7 @@ int chp_new(struct chp_id chpid)
395 chp->state = 1; 396 chp->state = 1;
396 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 397 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
397 chp->dev.release = chp_release; 398 chp->dev.release = chp_release;
399 mutex_init(&chp->lock);
398 400
399 /* Obtain channel path description and fill it in. */ 401 /* Obtain channel path description and fill it in. */
400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 402 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -464,7 +466,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
464 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 466 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
465 if (!desc) 467 if (!desc)
466 return NULL; 468 return NULL;
469
470 mutex_lock(&chp->lock);
467 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 471 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
472 mutex_unlock(&chp->lock);
468 return desc; 473 return desc;
469} 474}
470 475
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d2246176..12b4903d6fe3 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.h 2 * drivers/s390/cio/chp.h
3 * 3 *
4 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007,2010
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/mutex.h>
13#include <asm/chpid.h> 14#include <asm/chpid.h>
14#include "chsc.h" 15#include "chsc.h"
15#include "css.h" 16#include "css.h"
@@ -40,16 +41,23 @@ static inline int chp_test_bit(u8 *bitmap, int num)
40 41
41 42
42struct channel_path { 43struct channel_path {
44 struct device dev;
43 struct chp_id chpid; 45 struct chp_id chpid;
46 struct mutex lock; /* Serialize access to below members. */
44 int state; 47 int state;
45 struct channel_path_desc desc; 48 struct channel_path_desc desc;
46 /* Channel-measurement related stuff: */ 49 /* Channel-measurement related stuff: */
47 int cmg; 50 int cmg;
48 int shared; 51 int shared;
49 void *cmg_chars; 52 void *cmg_chars;
50 struct device dev;
51}; 53};
52 54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return channel_subsystems[chpid.cssid]->chps[chpid.id];
59}
60
53int chp_get_status(struct chp_id chpid); 61int chp_get_status(struct chp_id chpid);
54u8 chp_get_sch_opm(struct subchannel *sch); 62u8 chp_get_sch_opm(struct subchannel *sch);
55int chp_is_registered(struct chp_id chpid); 63int chp_is_registered(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4cbb1a6ca33c..75c3f1f8fd43 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright IBM Corp. 1999,2008 5 * Copyright IBM Corp. 1999,2010
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -29,8 +29,8 @@
29#include "chsc.h" 29#include "chsc.h"
30 30
31static void *sei_page; 31static void *sei_page;
32static DEFINE_SPINLOCK(siosl_lock); 32static void *chsc_page;
33static DEFINE_SPINLOCK(sda_lock); 33static DEFINE_SPINLOCK(chsc_page_lock);
34 34
35/** 35/**
36 * chsc_error_from_response() - convert a chsc response to an error 36 * chsc_error_from_response() - convert a chsc response to an error
@@ -85,17 +85,15 @@ struct chsc_ssd_area {
85 85
86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
87{ 87{
88 unsigned long page;
89 struct chsc_ssd_area *ssd_area; 88 struct chsc_ssd_area *ssd_area;
90 int ccode; 89 int ccode;
91 int ret; 90 int ret;
92 int i; 91 int i;
93 int mask; 92 int mask;
94 93
95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 94 spin_lock_irq(&chsc_page_lock);
96 if (!page) 95 memset(chsc_page, 0, PAGE_SIZE);
97 return -ENOMEM; 96 ssd_area = chsc_page;
98 ssd_area = (struct chsc_ssd_area *) page;
99 ssd_area->request.length = 0x0010; 97 ssd_area->request.length = 0x0010;
100 ssd_area->request.code = 0x0004; 98 ssd_area->request.code = 0x0004;
101 ssd_area->ssid = schid.ssid; 99 ssd_area->ssid = schid.ssid;
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
106 /* Check response. */ 104 /* Check response. */
107 if (ccode > 0) { 105 if (ccode > 0) {
108 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 ret = (ccode == 3) ? -ENODEV : -EBUSY;
109 goto out_free; 107 goto out;
110 } 108 }
111 ret = chsc_error_from_response(ssd_area->response.code); 109 ret = chsc_error_from_response(ssd_area->response.code);
112 if (ret != 0) { 110 if (ret != 0) {
113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 schid.ssid, schid.sch_no, 112 schid.ssid, schid.sch_no,
115 ssd_area->response.code); 113 ssd_area->response.code);
116 goto out_free; 114 goto out;
117 } 115 }
118 if (!ssd_area->sch_valid) { 116 if (!ssd_area->sch_valid) {
119 ret = -ENODEV; 117 ret = -ENODEV;
120 goto out_free; 118 goto out;
121 } 119 }
122 /* Copy data */ 120 /* Copy data */
123 ret = 0; 121 ret = 0;
124 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 memset(ssd, 0, sizeof(struct chsc_ssd_info));
125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
126 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
127 goto out_free; 125 goto out;
128 ssd->path_mask = ssd_area->path_mask; 126 ssd->path_mask = ssd_area->path_mask;
129 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
130 for (i = 0; i < 8; i++) { 128 for (i = 0; i < 8; i++) {
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
136 if (ssd_area->fla_valid_mask & mask) 134 if (ssd_area->fla_valid_mask & mask)
137 ssd->fla[i] = ssd_area->fla[i]; 135 ssd->fla[i] = ssd_area->fla[i];
138 } 136 }
139out_free: 137out:
140 free_page(page); 138 spin_unlock_irq(&chsc_page_lock);
141 return ret; 139 return ret;
142} 140}
143 141
@@ -328,6 +326,36 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
328 s390_process_res_acc(&link); 326 s390_process_res_acc(&link);
329} 327}
330 328
329static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
330{
331 struct channel_path *chp;
332 struct chp_id chpid;
333 u8 *data;
334 int num;
335
336 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
337 if (sei_area->rs != 0)
338 return;
339 data = sei_area->ccdf;
340 chp_id_init(&chpid);
341 for (num = 0; num <= __MAX_CHPID; num++) {
342 if (!chp_test_bit(data, num))
343 continue;
344 chpid.id = num;
345
346 CIO_CRW_EVENT(4, "Update information for channel path "
347 "%x.%02x\n", chpid.cssid, chpid.id);
348 chp = chpid_to_chp(chpid);
349 if (!chp) {
350 chp_new(chpid);
351 continue;
352 }
353 mutex_lock(&chp->lock);
354 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
355 mutex_unlock(&chp->lock);
356 }
357}
358
331struct chp_config_data { 359struct chp_config_data {
332 u8 map[32]; 360 u8 map[32];
333 u8 op; 361 u8 op;
@@ -378,9 +406,12 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
378 case 1: /* link incident*/ 406 case 1: /* link incident*/
379 chsc_process_sei_link_incident(sei_area); 407 chsc_process_sei_link_incident(sei_area);
380 break; 408 break;
381 case 2: /* i/o resource accessibiliy */ 409 case 2: /* i/o resource accessibility */
382 chsc_process_sei_res_acc(sei_area); 410 chsc_process_sei_res_acc(sei_area);
383 break; 411 break;
412 case 7: /* channel-path-availability information */
413 chsc_process_sei_chp_avail(sei_area);
414 break;
384 case 8: /* channel-path-configuration notification */ 415 case 8: /* channel-path-configuration notification */
385 chsc_process_sei_chp_config(sei_area); 416 chsc_process_sei_chp_config(sei_area);
386 break; 417 break;
@@ -497,6 +528,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
497 */ 528 */
498int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
499{ 530{
531 struct channel_path *chp = chpid_to_chp(chpid);
500 struct chp_link link; 532 struct chp_link link;
501 533
502 memset(&link, 0, sizeof(struct chp_link)); 534 memset(&link, 0, sizeof(struct chp_link));
@@ -506,11 +538,12 @@ int chsc_chp_vary(struct chp_id chpid, int on)
506 /* 538 /*
507 * Redo PathVerification on the devices the chpid connects to 539 * Redo PathVerification on the devices the chpid connects to
508 */ 540 */
509 541 if (on) {
510 if (on) 542 /* Try to update the channel path descritor. */
543 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512 __s390_vary_chpid_on, &link); 545 __s390_vary_chpid_on, &link);
513 else 546 } else
514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
515 NULL, &link); 548 NULL, &link);
516 549
@@ -552,7 +585,7 @@ cleanup:
552 return ret; 585 return ret;
553} 586}
554 587
555int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 588int __chsc_do_secm(struct channel_subsystem *css, int enable)
556{ 589{
557 struct { 590 struct {
558 struct chsc_header request; 591 struct chsc_header request;
@@ -573,7 +606,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
573 } __attribute__ ((packed)) *secm_area; 606 } __attribute__ ((packed)) *secm_area;
574 int ret, ccode; 607 int ret, ccode;
575 608
576 secm_area = page; 609 spin_lock_irq(&chsc_page_lock);
610 memset(chsc_page, 0, PAGE_SIZE);
611 secm_area = chsc_page;
577 secm_area->request.length = 0x0050; 612 secm_area->request.length = 0x0050;
578 secm_area->request.code = 0x0016; 613 secm_area->request.code = 0x0016;
579 614
@@ -584,8 +619,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
584 secm_area->operation_code = enable ? 0 : 1; 619 secm_area->operation_code = enable ? 0 : 1;
585 620
586 ccode = chsc(secm_area); 621 ccode = chsc(secm_area);
587 if (ccode > 0) 622 if (ccode > 0) {
588 return (ccode == 3) ? -ENODEV : -EBUSY; 623 ret = (ccode == 3) ? -ENODEV : -EBUSY;
624 goto out;
625 }
589 626
590 switch (secm_area->response.code) { 627 switch (secm_area->response.code) {
591 case 0x0102: 628 case 0x0102:
@@ -598,37 +635,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
598 if (ret != 0) 635 if (ret != 0)
599 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 636 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
600 secm_area->response.code); 637 secm_area->response.code);
638out:
639 spin_unlock_irq(&chsc_page_lock);
601 return ret; 640 return ret;
602} 641}
603 642
604int 643int
605chsc_secm(struct channel_subsystem *css, int enable) 644chsc_secm(struct channel_subsystem *css, int enable)
606{ 645{
607 void *secm_area;
608 int ret; 646 int ret;
609 647
610 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
611 if (!secm_area)
612 return -ENOMEM;
613
614 if (enable && !css->cm_enabled) { 648 if (enable && !css->cm_enabled) {
615 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 649 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
616 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 650 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
617 if (!css->cub_addr1 || !css->cub_addr2) { 651 if (!css->cub_addr1 || !css->cub_addr2) {
618 free_page((unsigned long)css->cub_addr1); 652 free_page((unsigned long)css->cub_addr1);
619 free_page((unsigned long)css->cub_addr2); 653 free_page((unsigned long)css->cub_addr2);
620 free_page((unsigned long)secm_area);
621 return -ENOMEM; 654 return -ENOMEM;
622 } 655 }
623 } 656 }
624 ret = __chsc_do_secm(css, enable, secm_area); 657 ret = __chsc_do_secm(css, enable);
625 if (!ret) { 658 if (!ret) {
626 css->cm_enabled = enable; 659 css->cm_enabled = enable;
627 if (css->cm_enabled) { 660 if (css->cm_enabled) {
628 ret = chsc_add_cmg_attr(css); 661 ret = chsc_add_cmg_attr(css);
629 if (ret) { 662 if (ret) {
630 memset(secm_area, 0, PAGE_SIZE); 663 __chsc_do_secm(css, 0);
631 __chsc_do_secm(css, 0, secm_area);
632 css->cm_enabled = 0; 664 css->cm_enabled = 0;
633 } 665 }
634 } else 666 } else
@@ -638,44 +670,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
638 free_page((unsigned long)css->cub_addr1); 670 free_page((unsigned long)css->cub_addr1);
639 free_page((unsigned long)css->cub_addr2); 671 free_page((unsigned long)css->cub_addr2);
640 } 672 }
641 free_page((unsigned long)secm_area);
642 return ret; 673 return ret;
643} 674}
644 675
645int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 676int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
646 int c, int m, 677 int c, int m, void *page)
647 struct chsc_response_struct *resp)
648{ 678{
679 struct chsc_scpd *scpd_area;
649 int ccode, ret; 680 int ccode, ret;
650 681
651 struct {
652 struct chsc_header request;
653 u32 : 2;
654 u32 m : 1;
655 u32 c : 1;
656 u32 fmt : 4;
657 u32 cssid : 8;
658 u32 : 4;
659 u32 rfmt : 4;
660 u32 first_chpid : 8;
661 u32 : 24;
662 u32 last_chpid : 8;
663 u32 zeroes1;
664 struct chsc_header response;
665 u8 data[PAGE_SIZE - 20];
666 } __attribute__ ((packed)) *scpd_area;
667
668 if ((rfmt == 1) && !css_general_characteristics.fcs) 682 if ((rfmt == 1) && !css_general_characteristics.fcs)
669 return -EINVAL; 683 return -EINVAL;
670 if ((rfmt == 2) && !css_general_characteristics.cib) 684 if ((rfmt == 2) && !css_general_characteristics.cib)
671 return -EINVAL; 685 return -EINVAL;
672 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
673 if (!scpd_area)
674 return -ENOMEM;
675 686
687 memset(page, 0, PAGE_SIZE);
688 scpd_area = page;
676 scpd_area->request.length = 0x0010; 689 scpd_area->request.length = 0x0010;
677 scpd_area->request.code = 0x0002; 690 scpd_area->request.code = 0x0002;
678
679 scpd_area->cssid = chpid.cssid; 691 scpd_area->cssid = chpid.cssid;
680 scpd_area->first_chpid = chpid.id; 692 scpd_area->first_chpid = chpid.id;
681 scpd_area->last_chpid = chpid.id; 693 scpd_area->last_chpid = chpid.id;
@@ -685,20 +697,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
685 scpd_area->rfmt = rfmt; 697 scpd_area->rfmt = rfmt;
686 698
687 ccode = chsc(scpd_area); 699 ccode = chsc(scpd_area);
688 if (ccode > 0) { 700 if (ccode > 0)
689 ret = (ccode == 3) ? -ENODEV : -EBUSY; 701 return (ccode == 3) ? -ENODEV : -EBUSY;
690 goto out;
691 }
692 702
693 ret = chsc_error_from_response(scpd_area->response.code); 703 ret = chsc_error_from_response(scpd_area->response.code);
694 if (ret == 0) 704 if (ret)
695 /* Success. */
696 memcpy(resp, &scpd_area->response, scpd_area->response.length);
697 else
698 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 705 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
699 scpd_area->response.code); 706 scpd_area->response.code);
700out:
701 free_page((unsigned long)scpd_area);
702 return ret; 707 return ret;
703} 708}
704EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 709EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -707,17 +712,38 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
707 struct channel_path_desc *desc) 712 struct channel_path_desc *desc)
708{ 713{
709 struct chsc_response_struct *chsc_resp; 714 struct chsc_response_struct *chsc_resp;
715 struct chsc_scpd *scpd_area;
716 unsigned long flags;
710 int ret; 717 int ret;
711 718
712 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 719 spin_lock_irqsave(&chsc_page_lock, flags);
713 if (!chsc_resp) 720 scpd_area = chsc_page;
714 return -ENOMEM; 721 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
715 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 722 if (ret)
723 goto out;
724 chsc_resp = (void *)&scpd_area->response;
725 memcpy(desc, &chsc_resp->data, sizeof(*desc));
726out:
727 spin_unlock_irqrestore(&chsc_page_lock, flags);
728 return ret;
729}
730
731int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
732 struct channel_path_desc_fmt1 *desc)
733{
734 struct chsc_response_struct *chsc_resp;
735 struct chsc_scpd *scpd_area;
736 int ret;
737
738 spin_lock_irq(&chsc_page_lock);
739 scpd_area = chsc_page;
740 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
716 if (ret) 741 if (ret)
717 goto out_free; 742 goto out;
743 chsc_resp = (void *)&scpd_area->response;
718 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 744 memcpy(desc, &chsc_resp->data, sizeof(*desc));
719out_free: 745out:
720 kfree(chsc_resp); 746 spin_unlock_irq(&chsc_page_lock);
721 return ret; 747 return ret;
722} 748}
723 749
@@ -725,33 +751,22 @@ static void
725chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 751chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
726 struct cmg_chars *chars) 752 struct cmg_chars *chars)
727{ 753{
728 switch (chp->cmg) { 754 struct cmg_chars *cmg_chars;
729 case 2: 755 int i, mask;
730 case 3: 756
731 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 757 cmg_chars = chp->cmg_chars;
732 GFP_KERNEL); 758 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
733 if (chp->cmg_chars) { 759 mask = 0x80 >> (i + 3);
734 int i, mask; 760 if (cmcv & mask)
735 struct cmg_chars *cmg_chars; 761 cmg_chars->values[i] = chars->values[i];
736 762 else
737 cmg_chars = chp->cmg_chars; 763 cmg_chars->values[i] = 0;
738 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
739 mask = 0x80 >> (i + 3);
740 if (cmcv & mask)
741 cmg_chars->values[i] = chars->values[i];
742 else
743 cmg_chars->values[i] = 0;
744 }
745 }
746 break;
747 default:
748 /* No cmg-dependent data. */
749 break;
750 } 764 }
751} 765}
752 766
753int chsc_get_channel_measurement_chars(struct channel_path *chp) 767int chsc_get_channel_measurement_chars(struct channel_path *chp)
754{ 768{
769 struct cmg_chars *cmg_chars;
755 int ccode, ret; 770 int ccode, ret;
756 771
757 struct { 772 struct {
@@ -775,13 +790,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
775 u32 data[NR_MEASUREMENT_CHARS]; 790 u32 data[NR_MEASUREMENT_CHARS];
776 } __attribute__ ((packed)) *scmc_area; 791 } __attribute__ ((packed)) *scmc_area;
777 792
778 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 793 chp->cmg_chars = NULL;
779 if (!scmc_area) 794 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
795 if (!cmg_chars)
780 return -ENOMEM; 796 return -ENOMEM;
781 797
798 spin_lock_irq(&chsc_page_lock);
799 memset(chsc_page, 0, PAGE_SIZE);
800 scmc_area = chsc_page;
782 scmc_area->request.length = 0x0010; 801 scmc_area->request.length = 0x0010;
783 scmc_area->request.code = 0x0022; 802 scmc_area->request.code = 0x0022;
784
785 scmc_area->first_chpid = chp->chpid.id; 803 scmc_area->first_chpid = chp->chpid.id;
786 scmc_area->last_chpid = chp->chpid.id; 804 scmc_area->last_chpid = chp->chpid.id;
787 805
@@ -792,53 +810,65 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
792 } 810 }
793 811
794 ret = chsc_error_from_response(scmc_area->response.code); 812 ret = chsc_error_from_response(scmc_area->response.code);
795 if (ret == 0) { 813 if (ret) {
796 /* Success. */
797 if (!scmc_area->not_valid) {
798 chp->cmg = scmc_area->cmg;
799 chp->shared = scmc_area->shared;
800 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
801 (struct cmg_chars *)
802 &scmc_area->data);
803 } else {
804 chp->cmg = -1;
805 chp->shared = -1;
806 }
807 } else {
808 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 814 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
809 scmc_area->response.code); 815 scmc_area->response.code);
816 goto out;
817 }
818 if (scmc_area->not_valid) {
819 chp->cmg = -1;
820 chp->shared = -1;
821 goto out;
810 } 822 }
823 chp->cmg = scmc_area->cmg;
824 chp->shared = scmc_area->shared;
825 if (chp->cmg != 2 && chp->cmg != 3) {
826 /* No cmg-dependent data. */
827 goto out;
828 }
829 chp->cmg_chars = cmg_chars;
830 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
831 (struct cmg_chars *) &scmc_area->data);
811out: 832out:
812 free_page((unsigned long)scmc_area); 833 spin_unlock_irq(&chsc_page_lock);
834 if (!chp->cmg_chars)
835 kfree(cmg_chars);
836
813 return ret; 837 return ret;
814} 838}
815 839
816int __init chsc_alloc_sei_area(void) 840int __init chsc_init(void)
817{ 841{
818 int ret; 842 int ret;
819 843
820 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 844 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
821 if (!sei_page) { 845 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
822 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 846 if (!sei_page || !chsc_page) {
823 "chsc machine checks!\n"); 847 ret = -ENOMEM;
824 return -ENOMEM; 848 goto out_err;
825 } 849 }
826 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 850 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
827 if (ret) 851 if (ret)
828 kfree(sei_page); 852 goto out_err;
853 return ret;
854out_err:
855 free_page((unsigned long)chsc_page);
856 free_page((unsigned long)sei_page);
829 return ret; 857 return ret;
830} 858}
831 859
832void __init chsc_free_sei_area(void) 860void __init chsc_init_cleanup(void)
833{ 861{
834 crw_unregister_handler(CRW_RSC_CSS); 862 crw_unregister_handler(CRW_RSC_CSS);
835 kfree(sei_page); 863 free_page((unsigned long)chsc_page);
864 free_page((unsigned long)sei_page);
836} 865}
837 866
838int chsc_enable_facility(int operation_code) 867int chsc_enable_facility(int operation_code)
839{ 868{
869 unsigned long flags;
840 int ret; 870 int ret;
841 static struct { 871 struct {
842 struct chsc_header request; 872 struct chsc_header request;
843 u8 reserved1:4; 873 u8 reserved1:4;
844 u8 format:4; 874 u8 format:4;
@@ -851,32 +881,33 @@ int chsc_enable_facility(int operation_code)
851 u32 reserved5:4; 881 u32 reserved5:4;
852 u32 format2:4; 882 u32 format2:4;
853 u32 reserved6:24; 883 u32 reserved6:24;
854 } __attribute__ ((packed, aligned(4096))) sda_area; 884 } __attribute__ ((packed)) *sda_area;
855 885
856 spin_lock(&sda_lock); 886 spin_lock_irqsave(&chsc_page_lock, flags);
857 memset(&sda_area, 0, sizeof(sda_area)); 887 memset(chsc_page, 0, PAGE_SIZE);
858 sda_area.request.length = 0x0400; 888 sda_area = chsc_page;
859 sda_area.request.code = 0x0031; 889 sda_area->request.length = 0x0400;
860 sda_area.operation_code = operation_code; 890 sda_area->request.code = 0x0031;
891 sda_area->operation_code = operation_code;
861 892
862 ret = chsc(&sda_area); 893 ret = chsc(sda_area);
863 if (ret > 0) { 894 if (ret > 0) {
864 ret = (ret == 3) ? -ENODEV : -EBUSY; 895 ret = (ret == 3) ? -ENODEV : -EBUSY;
865 goto out; 896 goto out;
866 } 897 }
867 898
868 switch (sda_area.response.code) { 899 switch (sda_area->response.code) {
869 case 0x0101: 900 case 0x0101:
870 ret = -EOPNOTSUPP; 901 ret = -EOPNOTSUPP;
871 break; 902 break;
872 default: 903 default:
873 ret = chsc_error_from_response(sda_area.response.code); 904 ret = chsc_error_from_response(sda_area->response.code);
874 } 905 }
875 if (ret != 0) 906 if (ret != 0)
876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 907 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
877 operation_code, sda_area.response.code); 908 operation_code, sda_area->response.code);
878 out: 909out:
879 spin_unlock(&sda_lock); 910 spin_unlock_irqrestore(&chsc_page_lock, flags);
880 return ret; 911 return ret;
881} 912}
882 913
@@ -895,13 +926,12 @@ chsc_determine_css_characteristics(void)
895 struct chsc_header response; 926 struct chsc_header response;
896 u32 reserved4; 927 u32 reserved4;
897 u32 general_char[510]; 928 u32 general_char[510];
898 u32 chsc_char[518]; 929 u32 chsc_char[508];
899 } __attribute__ ((packed)) *scsc_area; 930 } __attribute__ ((packed)) *scsc_area;
900 931
901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 932 spin_lock_irq(&chsc_page_lock);
902 if (!scsc_area) 933 memset(chsc_page, 0, PAGE_SIZE);
903 return -ENOMEM; 934 scsc_area = chsc_page;
904
905 scsc_area->request.length = 0x0010; 935 scsc_area->request.length = 0x0010;
906 scsc_area->request.code = 0x0010; 936 scsc_area->request.code = 0x0010;
907 937
@@ -921,7 +951,7 @@ chsc_determine_css_characteristics(void)
921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 951 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
922 scsc_area->response.code); 952 scsc_area->response.code);
923exit: 953exit:
924 free_page ((unsigned long) scsc_area); 954 spin_unlock_irq(&chsc_page_lock);
925 return result; 955 return result;
926} 956}
927 957
@@ -976,29 +1006,29 @@ int chsc_sstpi(void *page, void *result, size_t size)
976 return (rr->response.code == 0x0001) ? 0 : -EIO; 1006 return (rr->response.code == 0x0001) ? 0 : -EIO;
977} 1007}
978 1008
979static struct {
980 struct chsc_header request;
981 u32 word1;
982 struct subchannel_id sid;
983 u32 word3;
984 struct chsc_header response;
985 u32 word[11];
986} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
987
988int chsc_siosl(struct subchannel_id schid) 1009int chsc_siosl(struct subchannel_id schid)
989{ 1010{
1011 struct {
1012 struct chsc_header request;
1013 u32 word1;
1014 struct subchannel_id sid;
1015 u32 word3;
1016 struct chsc_header response;
1017 u32 word[11];
1018 } __attribute__ ((packed)) *siosl_area;
990 unsigned long flags; 1019 unsigned long flags;
991 int ccode; 1020 int ccode;
992 int rc; 1021 int rc;
993 1022
994 spin_lock_irqsave(&siosl_lock, flags); 1023 spin_lock_irqsave(&chsc_page_lock, flags);
995 memset(&siosl_area, 0, sizeof(siosl_area)); 1024 memset(chsc_page, 0, PAGE_SIZE);
996 siosl_area.request.length = 0x0010; 1025 siosl_area = chsc_page;
997 siosl_area.request.code = 0x0046; 1026 siosl_area->request.length = 0x0010;
998 siosl_area.word1 = 0x80000000; 1027 siosl_area->request.code = 0x0046;
999 siosl_area.sid = schid; 1028 siosl_area->word1 = 0x80000000;
1029 siosl_area->sid = schid;
1000 1030
1001 ccode = chsc(&siosl_area); 1031 ccode = chsc(siosl_area);
1002 if (ccode > 0) { 1032 if (ccode > 0) {
1003 if (ccode == 3) 1033 if (ccode == 3)
1004 rc = -ENODEV; 1034 rc = -ENODEV;
@@ -1008,17 +1038,16 @@ int chsc_siosl(struct subchannel_id schid)
1008 schid.ssid, schid.sch_no, ccode); 1038 schid.ssid, schid.sch_no, ccode);
1009 goto out; 1039 goto out;
1010 } 1040 }
1011 rc = chsc_error_from_response(siosl_area.response.code); 1041 rc = chsc_error_from_response(siosl_area->response.code);
1012 if (rc) 1042 if (rc)
1013 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1043 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1014 schid.ssid, schid.sch_no, 1044 schid.ssid, schid.sch_no,
1015 siosl_area.response.code); 1045 siosl_area->response.code);
1016 else 1046 else
1017 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1047 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1018 schid.ssid, schid.sch_no); 1048 schid.ssid, schid.sch_no);
1019out: 1049out:
1020 spin_unlock_irqrestore(&siosl_lock, flags); 1050 spin_unlock_irqrestore(&chsc_page_lock, flags);
1021
1022 return rc; 1051 return rc;
1023} 1052}
1024EXPORT_SYMBOL_GPL(chsc_siosl); 1053EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5453013f094b..3f15b2aaeaea 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -35,6 +35,22 @@ struct channel_path_desc {
35 u8 chpp; 35 u8 chpp;
36} __attribute__ ((packed)); 36} __attribute__ ((packed));
37 37
38struct channel_path_desc_fmt1 {
39 u8 flags;
40 u8 lsn;
41 u8 desc;
42 u8 chpid;
43 u32:24;
44 u8 chpp;
45 u32 unused[3];
46 u16 mdc;
47 u16:13;
48 u8 r:1;
49 u8 s:1;
50 u8 f:1;
51 u32 zeros[2];
52} __attribute__ ((packed));
53
38struct channel_path; 54struct channel_path;
39 55
40struct css_chsc_char { 56struct css_chsc_char {
@@ -57,23 +73,43 @@ struct chsc_ssd_info {
57 struct chp_id chpid[8]; 73 struct chp_id chpid[8];
58 u16 fla[8]; 74 u16 fla[8];
59}; 75};
76
77struct chsc_scpd {
78 struct chsc_header request;
79 u32:2;
80 u32 m:1;
81 u32 c:1;
82 u32 fmt:4;
83 u32 cssid:8;
84 u32:4;
85 u32 rfmt:4;
86 u32 first_chpid:8;
87 u32:24;
88 u32 last_chpid:8;
89 u32 zeroes1;
90 struct chsc_header response;
91 u8 data[PAGE_SIZE - 20];
92} __attribute__ ((packed));
93
94
60extern int chsc_get_ssd_info(struct subchannel_id schid, 95extern int chsc_get_ssd_info(struct subchannel_id schid,
61 struct chsc_ssd_info *ssd); 96 struct chsc_ssd_info *ssd);
62extern int chsc_determine_css_characteristics(void); 97extern int chsc_determine_css_characteristics(void);
63extern int chsc_alloc_sei_area(void); 98extern int chsc_init(void);
64extern void chsc_free_sei_area(void); 99extern void chsc_init_cleanup(void);
65 100
66extern int chsc_enable_facility(int); 101extern int chsc_enable_facility(int);
67struct channel_subsystem; 102struct channel_subsystem;
68extern int chsc_secm(struct channel_subsystem *, int); 103extern int chsc_secm(struct channel_subsystem *, int);
69int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); 104int __chsc_do_secm(struct channel_subsystem *css, int enable);
70 105
71int chsc_chp_vary(struct chp_id chpid, int on); 106int chsc_chp_vary(struct chp_id chpid, int on);
72int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 107int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
73 int c, int m, 108 int c, int m, void *page);
74 struct chsc_response_struct *resp);
75int chsc_determine_base_channel_path_desc(struct chp_id chpid, 109int chsc_determine_base_channel_path_desc(struct chp_id chpid,
76 struct channel_path_desc *desc); 110 struct channel_path_desc *desc);
111int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
112 struct channel_path_desc_fmt1 *desc);
77void chsc_chp_online(struct chp_id chpid); 113void chsc_chp_online(struct chp_id chpid);
78void chsc_chp_offline(struct chp_id chpid); 114void chsc_chp_offline(struct chp_id chpid);
79int chsc_get_channel_measurement_chars(struct channel_path *chp); 115int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index a83877c664a6..e950f1ad4dd1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
50 50
51static void chsc_subchannel_irq(struct subchannel *sch) 51static void chsc_subchannel_irq(struct subchannel *sch)
52{ 52{
53 struct chsc_private *private = sch->private; 53 struct chsc_private *private = dev_get_drvdata(&sch->dev);
54 struct chsc_request *request = private->request; 54 struct chsc_request *request = private->request;
55 struct irb *irb = (struct irb *)&S390_lowcore.irb; 55 struct irb *irb = (struct irb *)&S390_lowcore.irb;
56 56
@@ -80,13 +80,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
80 private = kzalloc(sizeof(*private), GFP_KERNEL); 80 private = kzalloc(sizeof(*private), GFP_KERNEL);
81 if (!private) 81 if (!private)
82 return -ENOMEM; 82 return -ENOMEM;
83 dev_set_drvdata(&sch->dev, private);
83 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 84 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
84 if (ret) { 85 if (ret) {
85 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", 86 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
86 sch->schid.ssid, sch->schid.sch_no, ret); 87 sch->schid.ssid, sch->schid.sch_no, ret);
88 dev_set_drvdata(&sch->dev, NULL);
87 kfree(private); 89 kfree(private);
88 } else { 90 } else {
89 sch->private = private;
90 if (dev_get_uevent_suppress(&sch->dev)) { 91 if (dev_get_uevent_suppress(&sch->dev)) {
91 dev_set_uevent_suppress(&sch->dev, 0); 92 dev_set_uevent_suppress(&sch->dev, 0);
92 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 93 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -100,8 +101,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
100 struct chsc_private *private; 101 struct chsc_private *private;
101 102
102 cio_disable_subchannel(sch); 103 cio_disable_subchannel(sch);
103 private = sch->private; 104 private = dev_get_drvdata(&sch->dev);
104 sch->private = NULL; 105 dev_set_drvdata(&sch->dev, NULL);
105 if (private->request) { 106 if (private->request) {
106 complete(&private->request->completion); 107 complete(&private->request->completion);
107 put_device(&sch->dev); 108 put_device(&sch->dev);
@@ -147,7 +148,10 @@ static struct css_device_id chsc_subchannel_ids[] = {
147MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); 148MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
148 149
149static struct css_driver chsc_subchannel_driver = { 150static struct css_driver chsc_subchannel_driver = {
150 .owner = THIS_MODULE, 151 .drv = {
152 .owner = THIS_MODULE,
153 .name = "chsc_subchannel",
154 },
151 .subchannel_type = chsc_subchannel_ids, 155 .subchannel_type = chsc_subchannel_ids,
152 .irq = chsc_subchannel_irq, 156 .irq = chsc_subchannel_irq,
153 .probe = chsc_subchannel_probe, 157 .probe = chsc_subchannel_probe,
@@ -157,7 +161,6 @@ static struct css_driver chsc_subchannel_driver = {
157 .freeze = chsc_subchannel_freeze, 161 .freeze = chsc_subchannel_freeze,
158 .thaw = chsc_subchannel_restore, 162 .thaw = chsc_subchannel_restore,
159 .restore = chsc_subchannel_restore, 163 .restore = chsc_subchannel_restore,
160 .name = "chsc_subchannel",
161}; 164};
162 165
163static int __init chsc_init_dbfs(void) 166static int __init chsc_init_dbfs(void)
@@ -241,7 +244,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
241 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; 244 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
242 while ((sch = chsc_get_next_subchannel(sch))) { 245 while ((sch = chsc_get_next_subchannel(sch))) {
243 spin_lock(sch->lock); 246 spin_lock(sch->lock);
244 private = sch->private; 247 private = dev_get_drvdata(&sch->dev);
245 if (private->request) { 248 if (private->request) {
246 spin_unlock(sch->lock); 249 spin_unlock(sch->lock);
247 ret = -EBUSY; 250 ret = -EBUSY;
@@ -688,25 +691,31 @@ out_free:
688 691
689static int chsc_ioctl_chpd(void __user *user_chpd) 692static int chsc_ioctl_chpd(void __user *user_chpd)
690{ 693{
694 struct chsc_scpd *scpd_area;
691 struct chsc_cpd_info *chpd; 695 struct chsc_cpd_info *chpd;
692 int ret; 696 int ret;
693 697
694 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 698 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
695 if (!chpd) 699 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
696 return -ENOMEM; 700 if (!scpd_area || !chpd) {
701 ret = -ENOMEM;
702 goto out_free;
703 }
697 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 704 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
698 ret = -EFAULT; 705 ret = -EFAULT;
699 goto out_free; 706 goto out_free;
700 } 707 }
701 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 708 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
702 chpd->rfmt, chpd->c, chpd->m, 709 chpd->rfmt, chpd->c, chpd->m,
703 &chpd->chpdb); 710 scpd_area);
704 if (ret) 711 if (ret)
705 goto out_free; 712 goto out_free;
713 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
706 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 714 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
707 ret = -EFAULT; 715 ret = -EFAULT;
708out_free: 716out_free:
709 kfree(chpd); 717 kfree(chpd);
718 free_page((unsigned long)scpd_area);
710 return ret; 719 return ret;
711} 720}
712 721
@@ -806,6 +815,7 @@ static const struct file_operations chsc_fops = {
806 .open = nonseekable_open, 815 .open = nonseekable_open,
807 .unlocked_ioctl = chsc_ioctl, 816 .unlocked_ioctl = chsc_ioctl,
808 .compat_ioctl = chsc_ioctl, 817 .compat_ioctl = chsc_ioctl,
818 .llseek = no_llseek,
809}; 819};
810 820
811static struct miscdevice chsc_misc_device = { 821static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f4e6cf3aceb8..cbde448f9947 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -84,29 +84,14 @@ out_unregister:
84 84
85arch_initcall (cio_debug_init); 85arch_initcall (cio_debug_init);
86 86
87int 87int cio_set_options(struct subchannel *sch, int flags)
88cio_set_options (struct subchannel *sch, int flags)
89{ 88{
90 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 89 struct io_subchannel_private *priv = to_io_private(sch);
91 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
92 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
93 return 0;
94}
95 90
96/* FIXME: who wants to use this? */ 91 priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
97int 92 priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
98cio_get_options (struct subchannel *sch) 93 priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
99{ 94 return 0;
100 int flags;
101
102 flags = 0;
103 if (sch->options.suspend)
104 flags |= DOIO_ALLOW_SUSPEND;
105 if (sch->options.prefetch)
106 flags |= DOIO_DENY_PREFETCH;
107 if (sch->options.inter)
108 flags |= DOIO_SUPPRESS_INTER;
109 return flags;
110} 95}
111 96
112static int 97static int
@@ -139,21 +124,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
139 __u8 lpm, /* logical path mask */ 124 __u8 lpm, /* logical path mask */
140 __u8 key) /* storage key */ 125 __u8 key) /* storage key */
141{ 126{
127 struct io_subchannel_private *priv = to_io_private(sch);
128 union orb *orb = &priv->orb;
142 int ccode; 129 int ccode;
143 union orb *orb;
144 130
145 CIO_TRACE_EVENT(5, "stIO"); 131 CIO_TRACE_EVENT(5, "stIO");
146 CIO_TRACE_EVENT(5, dev_name(&sch->dev)); 132 CIO_TRACE_EVENT(5, dev_name(&sch->dev));
147 133
148 orb = &to_io_private(sch)->orb;
149 memset(orb, 0, sizeof(union orb)); 134 memset(orb, 0, sizeof(union orb));
150 /* sch is always under 2G. */ 135 /* sch is always under 2G. */
151 orb->cmd.intparm = (u32)(addr_t)sch; 136 orb->cmd.intparm = (u32)(addr_t)sch;
152 orb->cmd.fmt = 1; 137 orb->cmd.fmt = 1;
153 138
154 orb->cmd.pfch = sch->options.prefetch == 0; 139 orb->cmd.pfch = priv->options.prefetch == 0;
155 orb->cmd.spnd = sch->options.suspend; 140 orb->cmd.spnd = priv->options.suspend;
156 orb->cmd.ssic = sch->options.suspend && sch->options.inter; 141 orb->cmd.ssic = priv->options.suspend && priv->options.inter;
157 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 142 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
158#ifdef CONFIG_64BIT 143#ifdef CONFIG_64BIT
159 /* 144 /*
@@ -619,7 +604,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
619 s390_idle_check(regs, S390_lowcore.int_clock, 604 s390_idle_check(regs, S390_lowcore.int_clock,
620 S390_lowcore.async_enter_timer); 605 S390_lowcore.async_enter_timer);
621 irq_enter(); 606 irq_enter();
622 __get_cpu_var(s390_idle).nohz_delay = 1; 607 __this_cpu_write(s390_idle.nohz_delay, 1);
623 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 608 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
624 /* Serve timer interrupts first. */ 609 /* Serve timer interrupts first. */
625 clock_comparator_work(); 610 clock_comparator_work();
@@ -630,11 +615,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
630 irb = (struct irb *)&S390_lowcore.irb; 615 irb = (struct irb *)&S390_lowcore.irb;
631 do { 616 do {
632 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 617 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
633 /* 618 if (tpi_info->adapter_IO) {
634 * Non I/O-subchannel thin interrupts are processed differently
635 */
636 if (tpi_info->adapter_IO == 1 &&
637 tpi_info->int_type == IO_INTERRUPT_TYPE) {
638 do_adapter_IO(tpi_info->isc); 619 do_adapter_IO(tpi_info->isc);
639 continue; 620 continue;
640 } 621 }
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a330..155a82bcb9e5 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -84,13 +84,6 @@ struct subchannel {
84 SUBCHANNEL_TYPE_MSG = 2, 84 SUBCHANNEL_TYPE_MSG = 2,
85 SUBCHANNEL_TYPE_ADM = 3, 85 SUBCHANNEL_TYPE_ADM = 3,
86 } st; /* subchannel type */ 86 } st; /* subchannel type */
87
88 struct {
89 unsigned int suspend:1; /* allow suspend */
90 unsigned int prefetch:1;/* deny prefetch */
91 unsigned int inter:1; /* suppress intermediate interrupts */
92 } __attribute__ ((packed)) options;
93
94 __u8 vpm; /* verified path mask */ 87 __u8 vpm; /* verified path mask */
95 __u8 lpm; /* logical path mask */ 88 __u8 lpm; /* logical path mask */
96 __u8 opm; /* operational path mask */ 89 __u8 opm; /* operational path mask */
@@ -99,14 +92,11 @@ struct subchannel {
99 struct chsc_ssd_info ssd_info; /* subchannel description */ 92 struct chsc_ssd_info ssd_info; /* subchannel description */
100 struct device dev; /* entry in device tree */ 93 struct device dev; /* entry in device tree */
101 struct css_driver *driver; 94 struct css_driver *driver;
102 void *private; /* private per subchannel type data */
103 enum sch_todo todo; 95 enum sch_todo todo;
104 struct work_struct todo_work; 96 struct work_struct todo_work;
105 struct schib_config config; 97 struct schib_config config;
106} __attribute__ ((aligned(8))); 98} __attribute__ ((aligned(8)));
107 99
108#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
109
110#define to_subchannel(n) container_of(n, struct subchannel, dev) 100#define to_subchannel(n) container_of(n, struct subchannel, dev)
111 101
112extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); 102extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
@@ -120,7 +110,6 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
120extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); 110extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
121extern int cio_cancel (struct subchannel *); 111extern int cio_cancel (struct subchannel *);
122extern int cio_set_options (struct subchannel *, int); 112extern int cio_set_options (struct subchannel *, int);
123extern int cio_get_options (struct subchannel *);
124extern int cio_update_schib(struct subchannel *sch); 113extern int cio_update_schib(struct subchannel *sch);
125extern int cio_commit_config(struct subchannel *sch); 114extern int cio_commit_config(struct subchannel *sch);
126 115
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ac94ac751459..c47b25fd3f43 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * driver for channel subsystem 2 * driver for channel subsystem
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002, 2010
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -35,6 +35,7 @@ int css_init_done = 0;
35int max_ssid; 35int max_ssid;
36 36
37struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38static struct bus_type css_bus_type;
38 39
39int 40int
40for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -577,7 +578,7 @@ static int __unset_registered(struct device *dev, void *data)
577 return 0; 578 return 0;
578} 579}
579 580
580void css_schedule_eval_all_unreg(void) 581static void css_schedule_eval_all_unreg(void)
581{ 582{
582 unsigned long flags; 583 unsigned long flags;
583 struct idset *unreg_set; 584 struct idset *unreg_set;
@@ -618,6 +619,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 619static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
619{ 620{
620 struct subchannel_id mchk_schid; 621 struct subchannel_id mchk_schid;
622 struct subchannel *sch;
621 623
622 if (overflow) { 624 if (overflow) {
623 css_schedule_eval_all(); 625 css_schedule_eval_all();
@@ -635,8 +637,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
635 init_subchannel_id(&mchk_schid); 637 init_subchannel_id(&mchk_schid);
636 mchk_schid.sch_no = crw0->rsid; 638 mchk_schid.sch_no = crw0->rsid;
637 if (crw1) 639 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 640 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
639 641
642 if (crw0->erc == CRW_ERC_PMOD) {
643 sch = get_subchannel_by_schid(mchk_schid);
644 if (sch) {
645 css_update_ssd_info(sch);
646 put_device(&sch->dev);
647 }
648 }
640 /* 649 /*
641 * Since we are always presented with IPI in the CRW, we have to 650 * Since we are always presented with IPI in the CRW, we have to
642 * use stsch() to find out if the subchannel in question has come 651 * use stsch() to find out if the subchannel in question has come
@@ -790,7 +799,6 @@ static struct notifier_block css_reboot_notifier = {
790static int css_power_event(struct notifier_block *this, unsigned long event, 799static int css_power_event(struct notifier_block *this, unsigned long event,
791 void *ptr) 800 void *ptr)
792{ 801{
793 void *secm_area;
794 int ret, i; 802 int ret, i;
795 803
796 switch (event) { 804 switch (event) {
@@ -806,15 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
806 mutex_unlock(&css->mutex); 814 mutex_unlock(&css->mutex);
807 continue; 815 continue;
808 } 816 }
809 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 817 if (__chsc_do_secm(css, 0))
810 GFP_DMA);
811 if (secm_area) {
812 if (__chsc_do_secm(css, 0, secm_area))
813 ret = NOTIFY_BAD;
814 free_page((unsigned long)secm_area);
815 } else
816 ret = NOTIFY_BAD; 818 ret = NOTIFY_BAD;
817
818 mutex_unlock(&css->mutex); 819 mutex_unlock(&css->mutex);
819 } 820 }
820 break; 821 break;
@@ -830,15 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
830 mutex_unlock(&css->mutex); 831 mutex_unlock(&css->mutex);
831 continue; 832 continue;
832 } 833 }
833 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 834 if (__chsc_do_secm(css, 1))
834 GFP_DMA);
835 if (secm_area) {
836 if (__chsc_do_secm(css, 1, secm_area))
837 ret = NOTIFY_BAD;
838 free_page((unsigned long)secm_area);
839 } else
840 ret = NOTIFY_BAD; 835 ret = NOTIFY_BAD;
841
842 mutex_unlock(&css->mutex); 836 mutex_unlock(&css->mutex);
843 } 837 }
844 /* search for subchannels, which appeared during hibernation */ 838 /* search for subchannels, which appeared during hibernation */
@@ -863,14 +857,11 @@ static int __init css_bus_init(void)
863{ 857{
864 int ret, i; 858 int ret, i;
865 859
866 ret = chsc_determine_css_characteristics(); 860 ret = chsc_init();
867 if (ret == -ENOMEM)
868 goto out;
869
870 ret = chsc_alloc_sei_area();
871 if (ret) 861 if (ret)
872 goto out; 862 return ret;
873 863
864 chsc_determine_css_characteristics();
874 /* Try to enable MSS. */ 865 /* Try to enable MSS. */
875 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 866 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
876 if (ret) 867 if (ret)
@@ -956,9 +947,9 @@ out_unregister:
956 } 947 }
957 bus_unregister(&css_bus_type); 948 bus_unregister(&css_bus_type);
958out: 949out:
959 crw_unregister_handler(CRW_RSC_CSS); 950 crw_unregister_handler(CRW_RSC_SCH);
960 chsc_free_sei_area();
961 idset_free(slow_subchannel_set); 951 idset_free(slow_subchannel_set);
952 chsc_init_cleanup();
962 pr_alert("The CSS device driver initialization failed with " 953 pr_alert("The CSS device driver initialization failed with "
963 "errno=%d\n", ret); 954 "errno=%d\n", ret);
964 return ret; 955 return ret;
@@ -978,9 +969,9 @@ static void __init css_bus_cleanup(void)
978 device_unregister(&css->device); 969 device_unregister(&css->device);
979 } 970 }
980 bus_unregister(&css_bus_type); 971 bus_unregister(&css_bus_type);
981 crw_unregister_handler(CRW_RSC_CSS); 972 crw_unregister_handler(CRW_RSC_SCH);
982 chsc_free_sei_area();
983 idset_free(slow_subchannel_set); 973 idset_free(slow_subchannel_set);
974 chsc_init_cleanup();
984 isc_unregister(IO_SCH_ISC); 975 isc_unregister(IO_SCH_ISC);
985} 976}
986 977
@@ -1048,7 +1039,16 @@ subsys_initcall_sync(channel_subsystem_init_sync);
1048 1039
1049void channel_subsystem_reinit(void) 1040void channel_subsystem_reinit(void)
1050{ 1041{
1042 struct channel_path *chp;
1043 struct chp_id chpid;
1044
1051 chsc_enable_facility(CHSC_SDA_OC_MSS); 1045 chsc_enable_facility(CHSC_SDA_OC_MSS);
1046 chp_id_for_each(&chpid) {
1047 chp = chpid_to_chp(chpid);
1048 if (!chp)
1049 continue;
1050 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
1051 }
1052} 1052}
1053 1053
1054#ifdef CONFIG_PROC_FS 1054#ifdef CONFIG_PROC_FS
@@ -1067,6 +1067,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1067static const struct file_operations cio_settle_proc_fops = { 1067static const struct file_operations cio_settle_proc_fops = {
1068 .open = nonseekable_open, 1068 .open = nonseekable_open,
1069 .write = cio_settle_write, 1069 .write = cio_settle_write,
1070 .llseek = no_llseek,
1070}; 1071};
1071 1072
1072static int __init cio_settle_init(void) 1073static int __init cio_settle_init(void)
@@ -1199,6 +1200,7 @@ static int css_pm_restore(struct device *dev)
1199 struct subchannel *sch = to_subchannel(dev); 1200 struct subchannel *sch = to_subchannel(dev);
1200 struct css_driver *drv; 1201 struct css_driver *drv;
1201 1202
1203 css_update_ssd_info(sch);
1202 if (!sch->dev.driver) 1204 if (!sch->dev.driver)
1203 return 0; 1205 return 0;
1204 drv = to_cssdriver(sch->dev.driver); 1206 drv = to_cssdriver(sch->dev.driver);
@@ -1213,7 +1215,7 @@ static const struct dev_pm_ops css_pm_ops = {
1213 .restore = css_pm_restore, 1215 .restore = css_pm_restore,
1214}; 1216};
1215 1217
1216struct bus_type css_bus_type = { 1218static struct bus_type css_bus_type = {
1217 .name = "css", 1219 .name = "css",
1218 .match = css_bus_match, 1220 .match = css_bus_match,
1219 .probe = css_probe, 1221 .probe = css_probe,
@@ -1232,9 +1234,7 @@ struct bus_type css_bus_type = {
1232 */ 1234 */
1233int css_driver_register(struct css_driver *cdrv) 1235int css_driver_register(struct css_driver *cdrv)
1234{ 1236{
1235 cdrv->drv.name = cdrv->name;
1236 cdrv->drv.bus = &css_bus_type; 1237 cdrv->drv.bus = &css_bus_type;
1237 cdrv->drv.owner = cdrv->owner;
1238 return driver_register(&cdrv->drv); 1238 return driver_register(&cdrv->drv);
1239} 1239}
1240EXPORT_SYMBOL_GPL(css_driver_register); 1240EXPORT_SYMBOL_GPL(css_driver_register);
@@ -1252,4 +1252,3 @@ void css_driver_unregister(struct css_driver *cdrv)
1252EXPORT_SYMBOL_GPL(css_driver_unregister); 1252EXPORT_SYMBOL_GPL(css_driver_unregister);
1253 1253
1254MODULE_LICENSE("GPL"); 1254MODULE_LICENSE("GPL");
1255EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 7e37886de231..80ebdddf7747 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -63,7 +63,6 @@ struct subchannel;
63struct chp_link; 63struct chp_link;
64/** 64/**
65 * struct css_driver - device driver for subchannels 65 * struct css_driver - device driver for subchannels
66 * @owner: owning module
67 * @subchannel_type: subchannel type supported by this driver 66 * @subchannel_type: subchannel type supported by this driver
68 * @drv: embedded device driver structure 67 * @drv: embedded device driver structure
69 * @irq: called on interrupts 68 * @irq: called on interrupts
@@ -78,10 +77,8 @@ struct chp_link;
78 * @thaw: undo work done in @freeze 77 * @thaw: undo work done in @freeze
79 * @restore: callback for restoring after hibernation 78 * @restore: callback for restoring after hibernation
80 * @settle: wait for asynchronous work to finish 79 * @settle: wait for asynchronous work to finish
81 * @name: name of the device driver
82 */ 80 */
83struct css_driver { 81struct css_driver {
84 struct module *owner;
85 struct css_device_id *subchannel_type; 82 struct css_device_id *subchannel_type;
86 struct device_driver drv; 83 struct device_driver drv;
87 void (*irq)(struct subchannel *); 84 void (*irq)(struct subchannel *);
@@ -96,16 +93,10 @@ struct css_driver {
96 int (*thaw) (struct subchannel *); 93 int (*thaw) (struct subchannel *);
97 int (*restore)(struct subchannel *); 94 int (*restore)(struct subchannel *);
98 int (*settle)(void); 95 int (*settle)(void);
99 const char *name;
100}; 96};
101 97
102#define to_cssdriver(n) container_of(n, struct css_driver, drv) 98#define to_cssdriver(n) container_of(n, struct css_driver, drv)
103 99
104/*
105 * all css_drivers have the css_bus_type
106 */
107extern struct bus_type css_bus_type;
108
109extern int css_driver_register(struct css_driver *); 100extern int css_driver_register(struct css_driver *);
110extern void css_driver_unregister(struct css_driver *); 101extern void css_driver_unregister(struct css_driver *);
111 102
@@ -140,7 +131,6 @@ struct channel_subsystem {
140}; 131};
141#define to_css(dev) container_of(dev, struct channel_subsystem, device) 132#define to_css(dev) container_of(dev, struct channel_subsystem, device)
142 133
143extern struct bus_type css_bus_type;
144extern struct channel_subsystem *channel_subsystems[]; 134extern struct channel_subsystem *channel_subsystems[];
145 135
146/* Helper functions to build lists for the slow path. */ 136/* Helper functions to build lists for the slow path. */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 51bd3687d163..8e04c00cf0ad 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -127,7 +127,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
127 return ret; 127 return ret;
128} 128}
129 129
130struct bus_type ccw_bus_type; 130static struct bus_type ccw_bus_type;
131 131
132static void io_subchannel_irq(struct subchannel *); 132static void io_subchannel_irq(struct subchannel *);
133static int io_subchannel_probe(struct subchannel *); 133static int io_subchannel_probe(struct subchannel *);
@@ -172,9 +172,11 @@ static int io_subchannel_settle(void)
172} 172}
173 173
174static struct css_driver io_subchannel_driver = { 174static struct css_driver io_subchannel_driver = {
175 .owner = THIS_MODULE, 175 .drv = {
176 .owner = THIS_MODULE,
177 .name = "io_subchannel",
178 },
176 .subchannel_type = io_subchannel_ids, 179 .subchannel_type = io_subchannel_ids,
177 .name = "io_subchannel",
178 .irq = io_subchannel_irq, 180 .irq = io_subchannel_irq,
179 .sch_event = io_subchannel_sch_event, 181 .sch_event = io_subchannel_sch_event,
180 .chp_event = io_subchannel_chp_event, 182 .chp_event = io_subchannel_chp_event,
@@ -539,15 +541,24 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
539 int force, ret; 541 int force, ret;
540 unsigned long i; 542 unsigned long i;
541 543
542 if (!dev_fsm_final_state(cdev) && 544 /* Prevent conflict between multiple on-/offline processing requests. */
543 cdev->private->state != DEV_STATE_DISCONNECTED)
544 return -EAGAIN;
545 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 545 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
546 return -EAGAIN; 546 return -EAGAIN;
547 /* Prevent conflict between internal I/Os and on-/offline processing. */
548 if (!dev_fsm_final_state(cdev) &&
549 cdev->private->state != DEV_STATE_DISCONNECTED) {
550 ret = -EAGAIN;
551 goto out_onoff;
552 }
553 /* Prevent conflict between pending work and on-/offline processing.*/
554 if (work_pending(&cdev->private->todo_work)) {
555 ret = -EAGAIN;
556 goto out_onoff;
557 }
547 558
548 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 559 if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
549 atomic_set(&cdev->private->onoff, 0); 560 ret = -EINVAL;
550 return -EINVAL; 561 goto out_onoff;
551 } 562 }
552 if (!strncmp(buf, "force\n", count)) { 563 if (!strncmp(buf, "force\n", count)) {
553 force = 1; 564 force = 1;
@@ -571,7 +582,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
571 } 582 }
572out: 583out:
573 if (cdev->drv) 584 if (cdev->drv)
574 module_put(cdev->drv->owner); 585 module_put(cdev->drv->driver.owner);
586out_onoff:
575 atomic_set(&cdev->private->onoff, 0); 587 atomic_set(&cdev->private->onoff, 0);
576 return (ret < 0) ? ret : count; 588 return (ret < 0) ? ret : count;
577} 589}
@@ -1030,6 +1042,7 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1030 */ 1042 */
1031static int io_subchannel_probe(struct subchannel *sch) 1043static int io_subchannel_probe(struct subchannel *sch)
1032{ 1044{
1045 struct io_subchannel_private *io_priv;
1033 struct ccw_device *cdev; 1046 struct ccw_device *cdev;
1034 int rc; 1047 int rc;
1035 1048
@@ -1073,10 +1086,11 @@ static int io_subchannel_probe(struct subchannel *sch)
1073 if (rc) 1086 if (rc)
1074 goto out_schedule; 1087 goto out_schedule;
1075 /* Allocate I/O subchannel private data. */ 1088 /* Allocate I/O subchannel private data. */
1076 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1089 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1077 GFP_KERNEL | GFP_DMA); 1090 if (!io_priv)
1078 if (!sch->private)
1079 goto out_schedule; 1091 goto out_schedule;
1092
1093 set_io_private(sch, io_priv);
1080 css_schedule_eval(sch->schid); 1094 css_schedule_eval(sch->schid);
1081 return 0; 1095 return 0;
1082 1096
@@ -1090,6 +1104,7 @@ out_schedule:
1090static int 1104static int
1091io_subchannel_remove (struct subchannel *sch) 1105io_subchannel_remove (struct subchannel *sch)
1092{ 1106{
1107 struct io_subchannel_private *io_priv = to_io_private(sch);
1093 struct ccw_device *cdev; 1108 struct ccw_device *cdev;
1094 1109
1095 cdev = sch_get_cdev(sch); 1110 cdev = sch_get_cdev(sch);
@@ -1099,11 +1114,12 @@ io_subchannel_remove (struct subchannel *sch)
1099 /* Set ccw device to not operational and drop reference. */ 1114 /* Set ccw device to not operational and drop reference. */
1100 spin_lock_irq(cdev->ccwlock); 1115 spin_lock_irq(cdev->ccwlock);
1101 sch_set_cdev(sch, NULL); 1116 sch_set_cdev(sch, NULL);
1117 set_io_private(sch, NULL);
1102 cdev->private->state = DEV_STATE_NOT_OPER; 1118 cdev->private->state = DEV_STATE_NOT_OPER;
1103 spin_unlock_irq(cdev->ccwlock); 1119 spin_unlock_irq(cdev->ccwlock);
1104 ccw_device_unregister(cdev); 1120 ccw_device_unregister(cdev);
1105out_free: 1121out_free:
1106 kfree(sch->private); 1122 kfree(io_priv);
1107 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1123 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1108 return 0; 1124 return 0;
1109} 1125}
@@ -1147,6 +1163,7 @@ err:
1147static int io_subchannel_chp_event(struct subchannel *sch, 1163static int io_subchannel_chp_event(struct subchannel *sch,
1148 struct chp_link *link, int event) 1164 struct chp_link *link, int event)
1149{ 1165{
1166 struct ccw_device *cdev = sch_get_cdev(sch);
1150 int mask; 1167 int mask;
1151 1168
1152 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1169 mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1156,22 +1173,30 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1156 case CHP_VARY_OFF: 1173 case CHP_VARY_OFF:
1157 sch->opm &= ~mask; 1174 sch->opm &= ~mask;
1158 sch->lpm &= ~mask; 1175 sch->lpm &= ~mask;
1176 if (cdev)
1177 cdev->private->path_gone_mask |= mask;
1159 io_subchannel_terminate_path(sch, mask); 1178 io_subchannel_terminate_path(sch, mask);
1160 break; 1179 break;
1161 case CHP_VARY_ON: 1180 case CHP_VARY_ON:
1162 sch->opm |= mask; 1181 sch->opm |= mask;
1163 sch->lpm |= mask; 1182 sch->lpm |= mask;
1183 if (cdev)
1184 cdev->private->path_new_mask |= mask;
1164 io_subchannel_verify(sch); 1185 io_subchannel_verify(sch);
1165 break; 1186 break;
1166 case CHP_OFFLINE: 1187 case CHP_OFFLINE:
1167 if (cio_update_schib(sch)) 1188 if (cio_update_schib(sch))
1168 return -ENODEV; 1189 return -ENODEV;
1190 if (cdev)
1191 cdev->private->path_gone_mask |= mask;
1169 io_subchannel_terminate_path(sch, mask); 1192 io_subchannel_terminate_path(sch, mask);
1170 break; 1193 break;
1171 case CHP_ONLINE: 1194 case CHP_ONLINE:
1172 if (cio_update_schib(sch)) 1195 if (cio_update_schib(sch))
1173 return -ENODEV; 1196 return -ENODEV;
1174 sch->lpm |= mask & sch->opm; 1197 sch->lpm |= mask & sch->opm;
1198 if (cdev)
1199 cdev->private->path_new_mask |= mask;
1175 io_subchannel_verify(sch); 1200 io_subchannel_verify(sch);
1176 break; 1201 break;
1177 } 1202 }
@@ -1196,6 +1221,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
1196 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1221 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1197 while (ret == -EBUSY) { 1222 while (ret == -EBUSY) {
1198 cdev->private->state = DEV_STATE_QUIESCE; 1223 cdev->private->state = DEV_STATE_QUIESCE;
1224 cdev->private->iretry = 255;
1199 ret = ccw_device_cancel_halt_clear(cdev); 1225 ret = ccw_device_cancel_halt_clear(cdev);
1200 if (ret == -EBUSY) { 1226 if (ret == -EBUSY) {
1201 ccw_device_set_timeout(cdev, HZ/10); 1227 ccw_device_set_timeout(cdev, HZ/10);
@@ -1295,10 +1321,12 @@ static int purge_fn(struct device *dev, void *data)
1295 1321
1296 spin_lock_irq(cdev->ccwlock); 1322 spin_lock_irq(cdev->ccwlock);
1297 if (is_blacklisted(id->ssid, id->devno) && 1323 if (is_blacklisted(id->ssid, id->devno) &&
1298 (cdev->private->state == DEV_STATE_OFFLINE)) { 1324 (cdev->private->state == DEV_STATE_OFFLINE) &&
1325 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1299 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1326 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1300 id->devno); 1327 id->devno);
1301 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1328 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1329 atomic_set(&cdev->private->onoff, 0);
1302 } 1330 }
1303 spin_unlock_irq(cdev->ccwlock); 1331 spin_unlock_irq(cdev->ccwlock);
1304 /* Abort loop in case of pending signal. */ 1332 /* Abort loop in case of pending signal. */
@@ -1445,7 +1473,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1445 break; 1473 break;
1446 case IO_SCH_UNREG_ATTACH: 1474 case IO_SCH_UNREG_ATTACH:
1447 case IO_SCH_UNREG: 1475 case IO_SCH_UNREG:
1448 if (cdev) 1476 if (!cdev)
1477 break;
1478 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1479 /*
1480 * Note: delayed work triggered by this event
1481 * and repeated calls to sch_event are synchronized
1482 * by the above check for work_pending(cdev).
1483 */
1484 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1485 } else
1449 ccw_device_set_notoper(cdev); 1486 ccw_device_set_notoper(cdev);
1450 break; 1487 break;
1451 case IO_SCH_NOP: 1488 case IO_SCH_NOP:
@@ -1468,9 +1505,13 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1468 goto out; 1505 goto out;
1469 break; 1506 break;
1470 case IO_SCH_UNREG_ATTACH: 1507 case IO_SCH_UNREG_ATTACH:
1508 if (cdev->private->flags.resuming) {
1509 /* Device will be handled later. */
1510 rc = 0;
1511 goto out;
1512 }
1471 /* Unregister ccw device. */ 1513 /* Unregister ccw device. */
1472 if (!cdev->private->flags.resuming) 1514 ccw_device_unregister(cdev);
1473 ccw_device_unregister(cdev);
1474 break; 1515 break;
1475 default: 1516 default:
1476 break; 1517 break;
@@ -1530,11 +1571,12 @@ spinlock_t * cio_get_console_lock(void)
1530static int ccw_device_console_enable(struct ccw_device *cdev, 1571static int ccw_device_console_enable(struct ccw_device *cdev,
1531 struct subchannel *sch) 1572 struct subchannel *sch)
1532{ 1573{
1574 struct io_subchannel_private *io_priv = cio_get_console_priv();
1533 int rc; 1575 int rc;
1534 1576
1535 /* Attach subchannel private data. */ 1577 /* Attach subchannel private data. */
1536 sch->private = cio_get_console_priv(); 1578 memset(io_priv, 0, sizeof(*io_priv));
1537 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1579 set_io_private(sch, io_priv);
1538 io_subchannel_init_fields(sch); 1580 io_subchannel_init_fields(sch);
1539 rc = cio_commit_config(sch); 1581 rc = cio_commit_config(sch);
1540 if (rc) 1582 if (rc)
@@ -1812,6 +1854,7 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1812 * available again. Kick re-detection. 1854 * available again. Kick re-detection.
1813 */ 1855 */
1814 cdev->private->flags.resuming = 1; 1856 cdev->private->flags.resuming = 1;
1857 cdev->private->path_new_mask = LPM_ANYPATH;
1815 css_schedule_eval(sch->schid); 1858 css_schedule_eval(sch->schid);
1816 spin_unlock_irq(sch->lock); 1859 spin_unlock_irq(sch->lock);
1817 css_complete_work(); 1860 css_complete_work();
@@ -1939,7 +1982,7 @@ static const struct dev_pm_ops ccw_pm_ops = {
1939 .restore = ccw_device_pm_restore, 1982 .restore = ccw_device_pm_restore,
1940}; 1983};
1941 1984
1942struct bus_type ccw_bus_type = { 1985static struct bus_type ccw_bus_type = {
1943 .name = "ccw", 1986 .name = "ccw",
1944 .match = ccw_bus_match, 1987 .match = ccw_bus_match,
1945 .uevent = ccw_uevent, 1988 .uevent = ccw_uevent,
@@ -1962,8 +2005,6 @@ int ccw_driver_register(struct ccw_driver *cdriver)
1962 struct device_driver *drv = &cdriver->driver; 2005 struct device_driver *drv = &cdriver->driver;
1963 2006
1964 drv->bus = &ccw_bus_type; 2007 drv->bus = &ccw_bus_type;
1965 drv->name = cdriver->name;
1966 drv->owner = cdriver->owner;
1967 2008
1968 return driver_register(drv); 2009 return driver_register(drv);
1969} 2010}
@@ -2081,5 +2122,4 @@ EXPORT_SYMBOL(ccw_device_set_offline);
2081EXPORT_SYMBOL(ccw_driver_register); 2122EXPORT_SYMBOL(ccw_driver_register);
2082EXPORT_SYMBOL(ccw_driver_unregister); 2123EXPORT_SYMBOL(ccw_driver_unregister);
2083EXPORT_SYMBOL(get_ccwdev_by_busid); 2124EXPORT_SYMBOL(get_ccwdev_by_busid);
2084EXPORT_SYMBOL(ccw_bus_type);
2085EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2125EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 379de2d1ec49..7e297c7bb5ff 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -133,7 +133,6 @@ void ccw_device_set_notoper(struct ccw_device *cdev);
133/* qdio needs this. */ 133/* qdio needs this. */
134void ccw_device_set_timeout(struct ccw_device *, int); 134void ccw_device_set_timeout(struct ccw_device *, int);
135extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 135extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
136extern struct bus_type ccw_bus_type;
137 136
138/* Channel measurement facility related */ 137/* Channel measurement facility related */
139void retry_set_schib(struct ccw_device *cdev); 138void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c9b852647f01..52c233fa2b12 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -174,7 +174,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
174 ret = cio_clear (sch); 174 ret = cio_clear (sch);
175 return (ret == 0) ? -EBUSY : ret; 175 return (ret == 0) ? -EBUSY : ret;
176 } 176 }
177 panic("Can't stop i/o on subchannel.\n"); 177 /* Function was unsuccessful */
178 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
179 cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
180 return -EIO;
178} 181}
179 182
180void ccw_device_update_sense_data(struct ccw_device *cdev) 183void ccw_device_update_sense_data(struct ccw_device *cdev)
@@ -315,7 +318,7 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
315 318
316/** 319/**
317 * ccw_device_notify() - inform the device's driver about an event 320 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured 321 * @cdev: device for which an event occurred
319 * @event: event that occurred 322 * @event: event that occurred
320 * 323 *
321 * Returns: 324 * Returns:
@@ -349,9 +352,13 @@ out:
349 352
350static void ccw_device_oper_notify(struct ccw_device *cdev) 353static void ccw_device_oper_notify(struct ccw_device *cdev)
351{ 354{
355 struct subchannel *sch = to_subchannel(cdev->dev.parent);
356
352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 357 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
353 /* Reenable channel measurements, if needed. */ 358 /* Reenable channel measurements, if needed. */
354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 359 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
360 /* Save indication for new paths. */
361 cdev->private->path_new_mask = sch->vpm;
355 return; 362 return;
356 } 363 }
357 /* Driver doesn't want device back. */ 364 /* Driver doesn't want device back. */
@@ -401,9 +408,10 @@ ccw_device_done(struct ccw_device *cdev, int state)
401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
402 "%04x\n", cdev->private->dev_id.devno, 409 "%04x\n", cdev->private->dev_id.devno,
403 sch->schid.sch_no); 410 sch->schid.sch_no);
404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 411 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
412 cdev->private->state = DEV_STATE_NOT_OPER;
405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 413 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
406 else 414 } else
407 ccw_device_set_disconnected(cdev); 415 ccw_device_set_disconnected(cdev);
408 cdev->private->flags.donotify = 0; 416 cdev->private->flags.donotify = 0;
409 break; 417 break;
@@ -462,6 +470,32 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
462 } 470 }
463} 471}
464 472
473static void ccw_device_report_path_events(struct ccw_device *cdev)
474{
475 struct subchannel *sch = to_subchannel(cdev->dev.parent);
476 int path_event[8];
477 int chp, mask;
478
479 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
480 path_event[chp] = PE_NONE;
481 if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
482 path_event[chp] |= PE_PATH_GONE;
483 if (mask & cdev->private->path_new_mask & sch->vpm)
484 path_event[chp] |= PE_PATH_AVAILABLE;
485 if (mask & cdev->private->pgid_reset_mask & sch->vpm)
486 path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
487 }
488 if (cdev->online && cdev->drv->path_event)
489 cdev->drv->path_event(cdev, path_event);
490}
491
492static void ccw_device_reset_path_events(struct ccw_device *cdev)
493{
494 cdev->private->path_gone_mask = 0;
495 cdev->private->path_new_mask = 0;
496 cdev->private->pgid_reset_mask = 0;
497}
498
465void 499void
466ccw_device_verify_done(struct ccw_device *cdev, int err) 500ccw_device_verify_done(struct ccw_device *cdev, int err)
467{ 501{
@@ -498,6 +532,7 @@ callback:
498 &cdev->private->irb); 532 &cdev->private->irb);
499 memset(&cdev->private->irb, 0, sizeof(struct irb)); 533 memset(&cdev->private->irb, 0, sizeof(struct irb));
500 } 534 }
535 ccw_device_report_path_events(cdev);
501 break; 536 break;
502 case -ETIME: 537 case -ETIME:
503 case -EUSERS: 538 case -EUSERS:
@@ -516,6 +551,7 @@ callback:
516 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 551 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
517 break; 552 break;
518 } 553 }
554 ccw_device_reset_path_events(cdev);
519} 555}
520 556
521/* 557/*
@@ -653,7 +689,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
653 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 689 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
654 /* 690 /*
655 * No final status yet or final status not yet delivered 691 * No final status yet or final status not yet delivered
656 * to the device driver. Can't do path verfication now, 692 * to the device driver. Can't do path verification now,
657 * delay until final status was delivered. 693 * delay until final status was delivered.
658 */ 694 */
659 cdev->private->flags.doverify = 1; 695 cdev->private->flags.doverify = 1;
@@ -734,13 +770,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
734 int ret; 770 int ret;
735 771
736 ccw_device_set_timeout(cdev, 0); 772 ccw_device_set_timeout(cdev, 0);
773 cdev->private->iretry = 255;
737 ret = ccw_device_cancel_halt_clear(cdev); 774 ret = ccw_device_cancel_halt_clear(cdev);
738 if (ret == -EBUSY) { 775 if (ret == -EBUSY) {
739 ccw_device_set_timeout(cdev, 3*HZ); 776 ccw_device_set_timeout(cdev, 3*HZ);
740 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 777 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
741 return; 778 return;
742 } 779 }
743 if (ret == -ENODEV) 780 if (ret)
744 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 781 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
745 else if (cdev->handler) 782 else if (cdev->handler)
746 cdev->handler(cdev, cdev->private->intparm, 783 cdev->handler(cdev, cdev->private->intparm,
@@ -804,9 +841,6 @@ call_handler:
804static void 841static void
805ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 842ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
806{ 843{
807 struct subchannel *sch;
808
809 sch = to_subchannel(cdev->dev.parent);
810 ccw_device_set_timeout(cdev, 0); 844 ccw_device_set_timeout(cdev, 0);
811 /* Start delayed path verification. */ 845 /* Start delayed path verification. */
812 ccw_device_online_verify(cdev, 0); 846 ccw_device_online_verify(cdev, 0);
@@ -837,6 +871,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
837{ 871{
838 int ret; 872 int ret;
839 873
874 cdev->private->iretry = 255;
840 ret = ccw_device_cancel_halt_clear(cdev); 875 ret = ccw_device_cancel_halt_clear(cdev);
841 if (ret == -EBUSY) { 876 if (ret == -EBUSY) {
842 ccw_device_set_timeout(cdev, 3*HZ); 877 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da84543dfe9..f98698d5735e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -418,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev)
418int 418int
419ccw_device_call_handler(struct ccw_device *cdev) 419ccw_device_call_handler(struct ccw_device *cdev)
420{ 420{
421 struct subchannel *sch;
422 unsigned int stctl; 421 unsigned int stctl;
423 int ending_status; 422 int ending_status;
424 423
425 sch = to_subchannel(cdev->dev.parent);
426
427 /* 424 /*
428 * we allow for the device action handler if . 425 * we allow for the device action handler if .
429 * - we received ending status 426 * - we received ending status
@@ -687,6 +684,46 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
687EXPORT_SYMBOL(ccw_device_tm_start_timeout); 684EXPORT_SYMBOL(ccw_device_tm_start_timeout);
688 685
689/** 686/**
687 * ccw_device_get_mdc - accumulate max data count
688 * @cdev: ccw device for which the max data count is accumulated
689 * @mask: mask of paths to use
690 *
691 * Return the number of 64K-bytes blocks all paths at least support
692 * for a transport command. Return values <= 0 indicate failures.
693 */
694int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
695{
696 struct subchannel *sch = to_subchannel(cdev->dev.parent);
697 struct channel_path_desc_fmt1 desc;
698 struct chp_id chpid;
699 int mdc = 0, ret, i;
700
701 /* Adjust requested path mask to excluded varied off paths. */
702 if (mask)
703 mask &= sch->lpm;
704 else
705 mask = sch->lpm;
706
707 chp_id_init(&chpid);
708 for (i = 0; i < 8; i++) {
709 if (!(mask & (0x80 >> i)))
710 continue;
711 chpid.id = sch->schib.pmcw.chpid[i];
712 ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
713 if (ret)
714 return ret;
715 if (!desc.f)
716 return 0;
717 if (!desc.r)
718 mdc = 1;
719 mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
720 }
721
722 return mdc;
723}
724EXPORT_SYMBOL(ccw_device_get_mdc);
725
726/**
690 * ccw_device_tm_intrg - perform interrogate function 727 * ccw_device_tm_intrg - perform interrogate function
691 * @cdev: ccw device on which to perform the interrogate function 728 * @cdev: ccw device on which to perform the interrogate function
692 * 729 *
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 82a5ad0d63f6..07a4fd29f096 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -213,6 +213,17 @@ static void spid_start(struct ccw_device *cdev)
213 spid_do(cdev); 213 spid_do(cdev);
214} 214}
215 215
216static int pgid_is_reset(struct pgid *p)
217{
218 char *c;
219
220 for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
221 if (*c != 0)
222 return 0;
223 }
224 return 1;
225}
226
216static int pgid_cmp(struct pgid *p1, struct pgid *p2) 227static int pgid_cmp(struct pgid *p1, struct pgid *p2)
217{ 228{
218 return memcmp((char *) p1 + 1, (char *) p2 + 1, 229 return memcmp((char *) p1 + 1, (char *) p2 + 1,
@@ -223,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
223 * Determine pathgroup state from PGID data. 234 * Determine pathgroup state from PGID data.
224 */ 235 */
225static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
226 int *mismatch, int *reserved, int *reset) 237 int *mismatch, int *reserved, u8 *reset)
227{ 238{
228 struct pgid *pgid = &cdev->private->pgid[0]; 239 struct pgid *pgid = &cdev->private->pgid[0];
229 struct pgid *first = NULL; 240 struct pgid *first = NULL;
@@ -238,9 +249,8 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
238 continue; 249 continue;
239 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
240 *reserved = 1; 251 *reserved = 1;
241 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) { 252 if (pgid_is_reset(pgid)) {
242 /* A PGID was reset. */ 253 *reset |= lpm;
243 *reset = 1;
244 continue; 254 continue;
245 } 255 }
246 if (!first) { 256 if (!first) {
@@ -307,7 +317,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
307 struct pgid *pgid; 317 struct pgid *pgid;
308 int mismatch = 0; 318 int mismatch = 0;
309 int reserved = 0; 319 int reserved = 0;
310 int reset = 0; 320 u8 reset = 0;
311 u8 donepm; 321 u8 donepm;
312 322
313 if (rc) 323 if (rc)
@@ -321,11 +331,12 @@ static void snid_done(struct ccw_device *cdev, int rc)
321 donepm = pgid_to_donepm(cdev); 331 donepm = pgid_to_donepm(cdev);
322 sch->vpm = donepm & sch->opm; 332 sch->vpm = donepm & sch->opm;
323 cdev->private->pgid_todo_mask &= ~donepm; 333 cdev->private->pgid_todo_mask &= ~donepm;
334 cdev->private->pgid_reset_mask |= reset;
324 pgid_fill(cdev, pgid); 335 pgid_fill(cdev, pgid);
325 } 336 }
326out: 337out:
327 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
328 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, 339 "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
329 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
330 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
331 switch (rc) { 342 switch (rc) {
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 469ef93f2302..ba31ad88f4f7 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -5,68 +5,36 @@
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h> 6#include <asm/ccwdev.h>
7#include "css.h" 7#include "css.h"
8 8#include "orb.h"
9/*
10 * command-mode operation request block
11 */
12struct cmd_orb {
13 u32 intparm; /* interruption parameter */
14 u32 key : 4; /* flags, like key, suspend control, etc. */
15 u32 spnd : 1; /* suspend control */
16 u32 res1 : 1; /* reserved */
17 u32 mod : 1; /* modification control */
18 u32 sync : 1; /* synchronize control */
19 u32 fmt : 1; /* format control */
20 u32 pfch : 1; /* prefetch control */
21 u32 isic : 1; /* initial-status-interruption control */
22 u32 alcc : 1; /* address-limit-checking control */
23 u32 ssic : 1; /* suppress-suspended-interr. control */
24 u32 res2 : 1; /* reserved */
25 u32 c64 : 1; /* IDAW/QDIO 64 bit control */
26 u32 i2k : 1; /* IDAW 2/4kB block size control */
27 u32 lpm : 8; /* logical path mask */
28 u32 ils : 1; /* incorrect length */
29 u32 zero : 6; /* reserved zeros */
30 u32 orbx : 1; /* ORB extension control */
31 u32 cpa; /* channel program address */
32} __attribute__ ((packed, aligned(4)));
33
34/*
35 * transport-mode operation request block
36 */
37struct tm_orb {
38 u32 intparm;
39 u32 key:4;
40 u32 :9;
41 u32 b:1;
42 u32 :2;
43 u32 lpm:8;
44 u32 :7;
45 u32 x:1;
46 u32 tcw;
47 u32 prio:8;
48 u32 :8;
49 u32 rsvpgm:8;
50 u32 :8;
51 u32 :32;
52 u32 :32;
53 u32 :32;
54 u32 :32;
55} __attribute__ ((packed, aligned(4)));
56
57union orb {
58 struct cmd_orb cmd;
59 struct tm_orb tm;
60} __attribute__ ((packed, aligned(4)));
61 9
62struct io_subchannel_private { 10struct io_subchannel_private {
63 union orb orb; /* operation request block */ 11 union orb orb; /* operation request block */
64 struct ccw1 sense_ccw; /* static ccw for sense command */ 12 struct ccw1 sense_ccw; /* static ccw for sense command */
65} __attribute__ ((aligned(8))); 13 struct ccw_device *cdev;/* pointer to the child ccw device */
14 struct {
15 unsigned int suspend:1; /* allow suspend */
16 unsigned int prefetch:1;/* deny prefetch */
17 unsigned int inter:1; /* suppress intermediate interrupts */
18 } __packed options;
19} __aligned(8);
66 20
67#define to_io_private(n) ((struct io_subchannel_private *)n->private) 21#define to_io_private(n) ((struct io_subchannel_private *) \
68#define sch_get_cdev(n) (dev_get_drvdata(&n->dev)) 22 dev_get_drvdata(&(n)->dev))
69#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c)) 23#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
24
25static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
26{
27 struct io_subchannel_private *priv = to_io_private(sch);
28 return priv ? priv->cdev : NULL;
29}
30
31static inline void sch_set_cdev(struct subchannel *sch,
32 struct ccw_device *cdev)
33{
34 struct io_subchannel_private *priv = to_io_private(sch);
35 if (priv)
36 priv->cdev = cdev;
37}
70 38
71#define MAX_CIWS 8 39#define MAX_CIWS 8
72 40
@@ -151,8 +119,11 @@ struct ccw_device_private {
151 struct subchannel_id schid; /* subchannel number */ 119 struct subchannel_id schid; /* subchannel number */
152 struct ccw_request req; /* internal I/O request */ 120 struct ccw_request req; /* internal I/O request */
153 int iretry; 121 int iretry;
154 u8 pgid_valid_mask; /* mask of valid PGIDs */ 122 u8 pgid_valid_mask; /* mask of valid PGIDs */
155 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 123 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
124 u8 pgid_reset_mask; /* mask of PGIDs which were reset */
125 u8 path_gone_mask; /* mask of paths, that became unavailable */
126 u8 path_new_mask; /* mask of paths, that became available */
156 struct { 127 struct {
157 unsigned int fast:1; /* post with "channel end" */ 128 unsigned int fast:1; /* post with "channel end" */
158 unsigned int repall:1; /* report every interrupt status */ 129 unsigned int repall:1; /* report every interrupt status */
@@ -188,23 +159,6 @@ struct ccw_device_private {
188 void *cmb_wait; /* deferred cmb enable/disable */ 159 void *cmb_wait; /* deferred cmb enable/disable */
189}; 160};
190 161
191static inline int ssch(struct subchannel_id schid, union orb *addr)
192{
193 register struct subchannel_id reg1 asm("1") = schid;
194 int ccode = -EIO;
195
196 asm volatile(
197 " ssch 0(%2)\n"
198 "0: ipm %0\n"
199 " srl %0,28\n"
200 "1:\n"
201 EX_TABLE(0b, 1b)
202 : "+d" (ccode)
203 : "d" (reg1), "a" (addr), "m" (*addr)
204 : "cc", "memory");
205 return ccode;
206}
207
208static inline int rsch(struct subchannel_id schid) 162static inline int rsch(struct subchannel_id schid)
209{ 163{
210 register struct subchannel_id reg1 asm("1") = schid; 164 register struct subchannel_id reg1 asm("1") = schid;
@@ -220,21 +174,6 @@ static inline int rsch(struct subchannel_id schid)
220 return ccode; 174 return ccode;
221} 175}
222 176
223static inline int csch(struct subchannel_id schid)
224{
225 register struct subchannel_id reg1 asm("1") = schid;
226 int ccode;
227
228 asm volatile(
229 " csch\n"
230 " ipm %0\n"
231 " srl %0,28"
232 : "=d" (ccode)
233 : "d" (reg1)
234 : "cc");
235 return ccode;
236}
237
238static inline int hsch(struct subchannel_id schid) 177static inline int hsch(struct subchannel_id schid)
239{ 178{
240 register struct subchannel_id reg1 asm("1") = schid; 179 register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index fac06155773f..4d80fc67a06b 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include "orb.h"
7#include "cio.h"
6 8
7/* 9/*
8 * TPI info structure 10 * TPI info structure
@@ -87,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
87 return ccode; 89 return ccode;
88} 90}
89 91
92static inline int ssch(struct subchannel_id schid, union orb *addr)
93{
94 register struct subchannel_id reg1 asm("1") = schid;
95 int ccode = -EIO;
96
97 asm volatile(
98 " ssch 0(%2)\n"
99 "0: ipm %0\n"
100 " srl %0,28\n"
101 "1:\n"
102 EX_TABLE(0b, 1b)
103 : "+d" (ccode)
104 : "d" (reg1), "a" (addr), "m" (*addr)
105 : "cc", "memory");
106 return ccode;
107}
108
109static inline int csch(struct subchannel_id schid)
110{
111 register struct subchannel_id reg1 asm("1") = schid;
112 int ccode;
113
114 asm volatile(
115 " csch\n"
116 " ipm %0\n"
117 " srl %0,28"
118 : "=d" (ccode)
119 : "d" (reg1)
120 : "cc");
121 return ccode;
122}
123
90static inline int tpi(struct tpi_info *addr) 124static inline int tpi(struct tpi_info *addr)
91{ 125{
92 int ccode; 126 int ccode;
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index a0ae29564774..358ee16d10a2 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) 93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{ 94{
95 size_t len; 95 size_t len;
96 int cross_count;
96 97
97 /* Main data. */ 98 /* Main data. */
98 len = sizeof(struct itcw); 99 len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
105 /* TSB */ sizeof(struct tsb) + 106 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); 107 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 } 108 }
109
108 /* Maximum required alignment padding. */ 110 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; 111 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */ 112
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) 113 /* TIDAW lists may not cross a 4k boundary. To cross a
112 len += max(max_tidaws, intrg_max_tidaws) * 114 * boundary we need to add a TTIC TIDAW. We need to reserve
113 sizeof(struct tidaw) - 1; 115 * one additional TIDAW for a TTIC that we may need to add due
116 * to the placement of the data chunk in memory, and a further
117 * TIDAW for each page boundary that the TIDAW list may cross
118 * due to it's own size.
119 */
120 if (max_tidaws) {
121 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
122 >> PAGE_SHIFT);
123 len += cross_count * sizeof(struct tidaw);
124 }
125 if (intrg_max_tidaws) {
126 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
127 >> PAGE_SHIFT);
128 len += cross_count * sizeof(struct tidaw);
129 }
114 return len; 130 return len;
115} 131}
116EXPORT_SYMBOL(itcw_calc_size); 132EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
165 void *chunk; 181 void *chunk;
166 addr_t start; 182 addr_t start;
167 addr_t end; 183 addr_t end;
184 int cross_count;
168 185
169 /* Check for 2G limit. */ 186 /* Check for 2G limit. */
170 start = (addr_t) buffer; 187 start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
177 if (IS_ERR(chunk)) 194 if (IS_ERR(chunk))
178 return chunk; 195 return chunk;
179 itcw = chunk; 196 itcw = chunk;
180 itcw->max_tidaws = max_tidaws; 197 /* allow for TTIC tidaws that may be needed to cross a page boundary */
181 itcw->intrg_max_tidaws = intrg_max_tidaws; 198 cross_count = 0;
199 if (max_tidaws)
200 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
201 >> PAGE_SHIFT);
202 itcw->max_tidaws = max_tidaws + cross_count;
203 cross_count = 0;
204 if (intrg_max_tidaws)
205 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
206 >> PAGE_SHIFT);
207 itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
182 /* Main TCW. */ 208 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); 209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk)) 210 if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
198 /* Data TIDAL. */ 224 /* Data TIDAL. */
199 if (max_tidaws > 0) { 225 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 226 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1); 227 itcw->max_tidaws, 16, 0);
202 if (IS_ERR(chunk)) 228 if (IS_ERR(chunk))
203 return chunk; 229 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1); 230 tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
206 /* Interrogate data TIDAL. */ 232 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) { 233 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 234 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1); 235 itcw->intrg_max_tidaws, 16, 0);
210 if (IS_ERR(chunk)) 236 if (IS_ERR(chunk))
211 return chunk; 237 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1); 238 tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the 309 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space. 310 * available space.
285 * 311 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The 312 * Note: TTIC tidaws are automatically added when needed, so explicitly calling
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. 313 * this interface with the TTIC flag is not supported. The last-tidaw flag
314 * for the last tidaw in the list will be set by itcw_finalize.
288 */ 315 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) 316struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{ 317{
318 struct tidaw *following;
319
291 if (itcw->num_tidaws >= itcw->max_tidaws) 320 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC); 321 return ERR_PTR(-ENOSPC);
322 /*
323 * Is the tidaw, which follows the one we are about to fill, on the next
324 * page? Then we have to insert a TTIC tidaw first, that points to the
325 * tidaw on the new page.
326 */
327 following = ((struct tidaw *) tcw_get_data(itcw->tcw))
328 + itcw->num_tidaws + 1;
329 if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
330 tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
331 TIDAW_FLAGS_TTIC, following, 0);
332 if (itcw->num_tidaws >= itcw->max_tidaws)
333 return ERR_PTR(-ENOSPC);
334 }
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); 335 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294} 336}
295EXPORT_SYMBOL(itcw_add_tidaw); 337EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 000000000000..45a9865c2b36
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,67 @@
1/*
2 * Orb related data structures.
3 *
4 * Copyright IBM Corp. 2007, 2011
5 *
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 * Sebastian Ott <sebott@linux.vnet.ibm.com>
9 */
10
11#ifndef S390_ORB_H
12#define S390_ORB_H
13
14/*
15 * Command-mode operation request block
16 */
17struct cmd_orb {
18 u32 intparm; /* interruption parameter */
19 u32 key:4; /* flags, like key, suspend control, etc. */
20 u32 spnd:1; /* suspend control */
21 u32 res1:1; /* reserved */
22 u32 mod:1; /* modification control */
23 u32 sync:1; /* synchronize control */
24 u32 fmt:1; /* format control */
25 u32 pfch:1; /* prefetch control */
26 u32 isic:1; /* initial-status-interruption control */
27 u32 alcc:1; /* address-limit-checking control */
28 u32 ssic:1; /* suppress-suspended-interr. control */
29 u32 res2:1; /* reserved */
30 u32 c64:1; /* IDAW/QDIO 64 bit control */
31 u32 i2k:1; /* IDAW 2/4kB block size control */
32 u32 lpm:8; /* logical path mask */
33 u32 ils:1; /* incorrect length */
34 u32 zero:6; /* reserved zeros */
35 u32 orbx:1; /* ORB extension control */
36 u32 cpa; /* channel program address */
37} __packed __aligned(4);
38
39/*
40 * Transport-mode operation request block
41 */
42struct tm_orb {
43 u32 intparm;
44 u32 key:4;
45 u32:9;
46 u32 b:1;
47 u32:2;
48 u32 lpm:8;
49 u32:7;
50 u32 x:1;
51 u32 tcw;
52 u32 prio:8;
53 u32:8;
54 u32 rsvpgm:8;
55 u32:8;
56 u32:32;
57 u32:32;
58 u32:32;
59 u32:32;
60} __packed __aligned(4);
61
62union orb {
63 struct cmd_orb cmd;
64 struct tm_orb tm;
65} __packed __aligned(4);
66
67#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f0037eefd44e..7bc643f3f5ab 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -91,6 +91,12 @@ enum qdio_irq_states {
91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ 91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ 92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
93 93
94/* SIGA flags */
95#define QDIO_SIGA_WRITE 0x00
96#define QDIO_SIGA_READ 0x01
97#define QDIO_SIGA_SYNC 0x02
98#define QDIO_SIGA_QEBSM_FLAG 0x80
99
94#ifdef CONFIG_64BIT 100#ifdef CONFIG_64BIT
95static inline int do_sqbs(u64 token, unsigned char state, int queue, 101static inline int do_sqbs(u64 token, unsigned char state, int queue,
96 int *start, int *count) 102 int *start, int *count)
@@ -142,10 +148,9 @@ struct siga_flag {
142 u8 input:1; 148 u8 input:1;
143 u8 output:1; 149 u8 output:1;
144 u8 sync:1; 150 u8 sync:1;
145 u8 no_sync_ti:1; 151 u8 sync_after_ai:1;
146 u8 no_sync_out_ti:1; 152 u8 sync_out_after_pci:1;
147 u8 no_sync_out_pci:1; 153 u8:3;
148 u8:2;
149} __attribute__ ((packed)); 154} __attribute__ ((packed));
150 155
151struct chsc_ssqd_area { 156struct chsc_ssqd_area {
@@ -202,12 +207,14 @@ struct qdio_dev_perf_stat {
202 unsigned int inbound_queue_full; 207 unsigned int inbound_queue_full;
203 unsigned int outbound_call; 208 unsigned int outbound_call;
204 unsigned int outbound_handler; 209 unsigned int outbound_handler;
210 unsigned int outbound_queue_full;
205 unsigned int fast_requeue; 211 unsigned int fast_requeue;
206 unsigned int target_full; 212 unsigned int target_full;
207 unsigned int eqbs; 213 unsigned int eqbs;
208 unsigned int eqbs_partial; 214 unsigned int eqbs_partial;
209 unsigned int sqbs; 215 unsigned int sqbs;
210 unsigned int sqbs_partial; 216 unsigned int sqbs_partial;
217 unsigned int int_discarded;
211} ____cacheline_aligned; 218} ____cacheline_aligned;
212 219
213struct qdio_queue_perf_stat { 220struct qdio_queue_perf_stat {
@@ -222,6 +229,10 @@ struct qdio_queue_perf_stat {
222 unsigned int nr_sbal_total; 229 unsigned int nr_sbal_total;
223}; 230};
224 231
232enum qdio_queue_irq_states {
233 QDIO_QUEUE_IRQS_DISABLED,
234};
235
225struct qdio_input_q { 236struct qdio_input_q {
226 /* input buffer acknowledgement flag */ 237 /* input buffer acknowledgement flag */
227 int polling; 238 int polling;
@@ -231,15 +242,19 @@ struct qdio_input_q {
231 int ack_count; 242 int ack_count;
232 /* last time of noticing incoming data */ 243 /* last time of noticing incoming data */
233 u64 timestamp; 244 u64 timestamp;
245 /* upper-layer polling flag */
246 unsigned long queue_irq_state;
247 /* callback to start upper-layer polling */
248 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
234}; 249};
235 250
236struct qdio_output_q { 251struct qdio_output_q {
237 /* PCIs are enabled for the queue */ 252 /* PCIs are enabled for the queue */
238 int pci_out_enabled; 253 int pci_out_enabled;
239 /* IQDIO: output multiple buffers (enhanced SIGA) */
240 int use_enh_siga;
241 /* timer to check for more outbound work */ 254 /* timer to check for more outbound work */
242 struct timer_list timer; 255 struct timer_list timer;
256 /* used SBALs before tasklet schedule */
257 int scan_threshold;
243}; 258};
244 259
245/* 260/*
@@ -374,12 +389,13 @@ static inline int multicast_outbound(struct qdio_q *q)
374 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 389 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
375#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 390#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
376 391
377#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
378#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
379#define need_siga_in(q) (q->irq_ptr->siga_flag.input) 392#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
380#define need_siga_out(q) (q->irq_ptr->siga_flag.output) 393#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
381#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) 394#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
382#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) 395#define need_siga_sync_after_ai(q) \
396 (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
397#define need_siga_sync_out_after_pci(q) \
398 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
383 399
384#define for_each_input_queue(irq_ptr, q, i) \ 400#define for_each_input_queue(irq_ptr, q, i) \
385 for (i = 0, q = irq_ptr->input_qs[0]; \ 401 for (i = 0, q = irq_ptr->input_qs[0]; \
@@ -399,6 +415,26 @@ static inline int multicast_outbound(struct qdio_q *q)
399#define sub_buf(bufnr, dec) \ 415#define sub_buf(bufnr, dec) \
400 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 416 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
401 417
418#define queue_irqs_enabled(q) \
419 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
420#define queue_irqs_disabled(q) \
421 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
422
423#define TIQDIO_SHARED_IND 63
424
425/* device state change indicators */
426struct indicator_t {
427 u32 ind; /* u32 because of compare-and-swap performance */
428 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
429};
430
431extern struct indicator_t *q_indicators;
432
433static inline int shared_ind(u32 *dsci)
434{
435 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
436}
437
402/* prototypes for thin interrupt */ 438/* prototypes for thin interrupt */
403void qdio_setup_thinint(struct qdio_irq *irq_ptr); 439void qdio_setup_thinint(struct qdio_irq *irq_ptr);
404int qdio_establish_thinint(struct qdio_irq *irq_ptr); 440int qdio_establish_thinint(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 6ce83f56d537..f8b03a636e49 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)
56 56
57 seq_printf(m, "DSCI: %d nr_used: %d\n", 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
59 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 59 seq_printf(m, "ftc: %d last_move: %d\n",
60 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 60 q->first_to_check, q->last_move);
61 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 61 if (q->is_input_q) {
62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
63 q->u.in.polling, q->u.in.ack_start,
64 q->u.in.ack_count);
65 seq_printf(m, "IRQs disabled: %u\n",
66 test_bit(QDIO_QUEUE_IRQS_DISABLED,
67 &q->u.in.queue_irq_state));
68 }
62 seq_printf(m, "SBAL states:\n"); 69 seq_printf(m, "SBAL states:\n");
63 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
64 71
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
113 return 0; 120 return 0;
114} 121}
115 122
116static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
117 size_t count, loff_t *off)
118{
119 struct seq_file *seq = file->private_data;
120 struct qdio_q *q = seq->private;
121
122 if (!q)
123 return 0;
124 if (q->is_input_q)
125 xchg(q->irq_ptr->dsci, 1);
126 local_bh_disable();
127 tasklet_schedule(&q->tasklet);
128 local_bh_enable();
129 return count;
130}
131
132static int qstat_seq_open(struct inode *inode, struct file *filp) 123static int qstat_seq_open(struct inode *inode, struct file *filp)
133{ 124{
134 return single_open(filp, qstat_show, 125 return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
139 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
140 .open = qstat_seq_open, 131 .open = qstat_seq_open,
141 .read = seq_read, 132 .read = seq_read,
142 .write = qstat_seq_write,
143 .llseek = seq_lseek, 133 .llseek = seq_lseek,
144 .release = single_release, 134 .release = single_release,
145}; 135};
@@ -161,12 +151,14 @@ static char *qperf_names[] = {
161 "Inbound queue full", 151 "Inbound queue full",
162 "Outbound calls", 152 "Outbound calls",
163 "Outbound handler", 153 "Outbound handler",
154 "Outbound queue full",
164 "Outbound fast_requeue", 155 "Outbound fast_requeue",
165 "Outbound target_full", 156 "Outbound target_full",
166 "QEBSM eqbs", 157 "QEBSM eqbs",
167 "QEBSM eqbs partial", 158 "QEBSM eqbs partial",
168 "QEBSM sqbs", 159 "QEBSM sqbs",
169 "QEBSM sqbs partial" 160 "QEBSM sqbs partial",
161 "Discarded interrupts"
170}; 162};
171 163
172static int qperf_show(struct seq_file *m, void *v) 164static int qperf_show(struct seq_file *m, void *v)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8e..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18#include <asm/debug.h> 19#include <asm/debug.h>
19#include <asm/qdio.h> 20#include <asm/qdio.h>
@@ -29,11 +30,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29MODULE_DESCRIPTION("QDIO base support"); 30MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
32static inline int do_siga_sync(struct subchannel_id schid, 33static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask) 34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
34{ 36{
35 register unsigned long __fc asm ("0") = 2; 37 register unsigned long __fc asm ("0") = fc;
36 register struct subchannel_id __schid asm ("1") = schid; 38 register unsigned long __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask; 39 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask; 40 register unsigned long in asm ("3") = in_mask;
39 int cc; 41 int cc;
@@ -47,10 +49,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
47 return cc; 49 return cc;
48} 50}
49 51
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) 52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
51{ 54{
52 register unsigned long __fc asm ("0") = 1; 55 register unsigned long __fc asm ("0") = fc;
53 register struct subchannel_id __schid asm ("1") = schid; 56 register unsigned long __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask; 57 register unsigned long __mask asm ("2") = mask;
55 int cc; 58 int cc;
56 59
@@ -279,16 +282,20 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 283 unsigned int input)
281{ 284{
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
282 int cc; 287 int cc;
283 288
284 if (!need_siga_sync(q))
285 return 0;
286
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qperf_inc(q, siga_sync); 290 qperf_inc(q, siga_sync);
289 291
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 292 if (is_qebsm(q)) {
291 if (cc) 293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
295 }
296
297 cc = do_siga_sync(schid, output, input, fc);
298 if (unlikely(cc))
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
293 return cc; 300 return cc;
294} 301}
@@ -301,38 +308,22 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
301 return qdio_siga_sync(q, q->mask, 0); 308 return qdio_siga_sync(q, q->mask, 0);
302} 309}
303 310
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{ 312{
316 unsigned long schid; 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = 0; 314 unsigned int fc = QDIO_SIGA_WRITE;
318 u64 start_time = 0; 315 u64 start_time = 0;
319 int cc; 316 int cc;
320 317
321 if (q->u.out.use_enh_siga)
322 fc = 3;
323
324 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
325 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
326 fc |= 0x80; 320 fc |= QDIO_SIGA_QEBSM_FLAG;
327 } 321 }
328 else
329 schid = *((u32 *)&q->irq_ptr->schid);
330
331again: 322again:
332 cc = do_siga_output(schid, q->mask, busy_bit, fc); 323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 324
334 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
335 if (*busy_bit) { 326 if (unlikely(*busy_bit)) {
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 328
338 if (!start_time) { 329 if (!start_time) {
@@ -347,32 +338,41 @@ again:
347 338
348static inline int qdio_siga_input(struct qdio_q *q) 339static inline int qdio_siga_input(struct qdio_q *q)
349{ 340{
341 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 unsigned int fc = QDIO_SIGA_READ;
350 int cc; 343 int cc;
351 344
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 345 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qperf_inc(q, siga_read); 346 qperf_inc(q, siga_read);
354 347
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 348 if (is_qebsm(q)) {
356 if (cc) 349 schid = q->irq_ptr->sch_token;
350 fc |= QDIO_SIGA_QEBSM_FLAG;
351 }
352
353 cc = do_siga_input(schid, q->mask, fc);
354 if (unlikely(cc))
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
358 return cc; 356 return cc;
359} 357}
360 358
361static inline void qdio_sync_after_thinint(struct qdio_q *q) 359#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361
362static inline void qdio_sync_queues(struct qdio_q *q)
362{ 363{
363 if (pci_out_supported(q)) { 364 /* PCI capable outbound queues will also be scanned so sync them too */
364 if (need_siga_sync_thinint(q)) 365 if (pci_out_supported(q))
365 qdio_siga_sync_all(q); 366 qdio_siga_sync_all(q);
366 else if (need_siga_sync_out_thinint(q)) 367 else
367 qdio_siga_sync_out(q);
368 } else
369 qdio_siga_sync_q(q); 368 qdio_siga_sync_q(q);
370} 369}
371 370
372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 371int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state) 372 unsigned char *state)
374{ 373{
375 qdio_siga_sync_q(q); 374 if (need_siga_sync(q))
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0); 376 return get_buf_states(q, bufnr, state, 1, 0);
377} 377}
378 378
@@ -407,13 +407,16 @@ static inline void account_sbals(struct qdio_q *q, int count)
407 q->q_stats.nr_sbals[pos]++; 407 q->q_stats.nr_sbals[pos]++;
408} 408}
409 409
410static void announce_buffer_error(struct qdio_q *q, int count) 410static void process_buffer_error(struct qdio_q *q, int count)
411{ 411{
412 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
413 SLSB_P_OUTPUT_NOT_INIT;
414
412 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 415 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
413 416
414 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
415 if ((!q->is_input_q && 418 if ((!q->is_input_q &&
416 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 419 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
417 qperf_inc(q, target_full); 420 qperf_inc(q, target_full);
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
419 q->first_to_check); 422 q->first_to_check);
@@ -424,8 +427,14 @@ static void announce_buffer_error(struct qdio_q *q, int count)
424 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
425 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
426 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
427 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].sflags,
428 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].sflags);
432
433 /*
434 * Interrupts may be avoided as long as the error is present
435 * so change the buffer state immediately to avoid starvation.
436 */
437 set_buf_states(q, q->first_to_check, state, count);
429} 438}
430 439
431static inline void inbound_primed(struct qdio_q *q, int count) 440static inline void inbound_primed(struct qdio_q *q, int count)
@@ -476,7 +485,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
476static int get_inbound_buffer_frontier(struct qdio_q *q) 485static int get_inbound_buffer_frontier(struct qdio_q *q)
477{ 486{
478 int count, stop; 487 int count, stop;
479 unsigned char state; 488 unsigned char state = 0;
480 489
481 /* 490 /*
482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 491 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -506,8 +515,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
506 account_sbals(q, count); 515 account_sbals(q, count);
507 break; 516 break;
508 case SLSB_P_INPUT_ERROR: 517 case SLSB_P_INPUT_ERROR:
509 announce_buffer_error(q, count); 518 process_buffer_error(q, count);
510 /* process the buffer, the upper layer will take care of it */
511 q->first_to_check = add_buf(q->first_to_check, count); 519 q->first_to_check = add_buf(q->first_to_check, count);
512 atomic_sub(count, &q->nr_buf_used); 520 atomic_sub(count, &q->nr_buf_used);
513 if (q->irq_ptr->perf_stat_enabled) 521 if (q->irq_ptr->perf_stat_enabled)
@@ -549,7 +557,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
549 if (!atomic_read(&q->nr_buf_used)) 557 if (!atomic_read(&q->nr_buf_used))
550 return 1; 558 return 1;
551 559
552 qdio_siga_sync_q(q); 560 if (need_siga_sync(q))
561 qdio_siga_sync_q(q);
553 get_buf_state(q, q->first_to_check, &state, 0); 562 get_buf_state(q, q->first_to_check, &state, 0);
554 563
555 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 564 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -642,11 +651,14 @@ void qdio_inbound_processing(unsigned long data)
642static int get_outbound_buffer_frontier(struct qdio_q *q) 651static int get_outbound_buffer_frontier(struct qdio_q *q)
643{ 652{
644 int count, stop; 653 int count, stop;
645 unsigned char state; 654 unsigned char state = 0;
646 655
647 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 656 if (need_siga_sync(q))
648 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 657 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
649 qdio_siga_sync_q(q); 658 !pci_out_supported(q)) ||
659 (queue_type(q) == QDIO_IQDIO_QFMT &&
660 multicast_outbound(q)))
661 qdio_siga_sync_q(q);
650 662
651 /* 663 /*
652 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 664 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -673,8 +685,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
673 account_sbals(q, count); 685 account_sbals(q, count);
674 break; 686 break;
675 case SLSB_P_OUTPUT_ERROR: 687 case SLSB_P_OUTPUT_ERROR:
676 announce_buffer_error(q, count); 688 process_buffer_error(q, count);
677 /* process the buffer, the upper layer will take care of it */
678 q->first_to_check = add_buf(q->first_to_check, count); 689 q->first_to_check = add_buf(q->first_to_check, count);
679 atomic_sub(count, &q->nr_buf_used); 690 atomic_sub(count, &q->nr_buf_used);
680 if (q->irq_ptr->perf_stat_enabled) 691 if (q->irq_ptr->perf_stat_enabled)
@@ -818,7 +829,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
818static void __tiqdio_inbound_processing(struct qdio_q *q) 829static void __tiqdio_inbound_processing(struct qdio_q *q)
819{ 830{
820 qperf_inc(q, tasklet_inbound); 831 qperf_inc(q, tasklet_inbound);
821 qdio_sync_after_thinint(q); 832 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
833 qdio_sync_queues(q);
822 834
823 /* 835 /*
824 * The interrupt could be caused by a PCI request. Check the 836 * The interrupt could be caused by a PCI request. Check the
@@ -884,19 +896,28 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 896 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
885 return; 897 return;
886 898
887 for_each_input_queue(irq_ptr, q, i) 899 for_each_input_queue(irq_ptr, q, i) {
888 tasklet_schedule(&q->tasklet); 900 if (q->u.in.queue_start_poll) {
901 /* skip if polling is enabled or already in work */
902 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
903 &q->u.in.queue_irq_state)) {
904 qperf_inc(q, int_discarded);
905 continue;
906 }
907 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
908 q->irq_ptr->int_parm);
909 } else
910 tasklet_schedule(&q->tasklet);
911 }
889 912
890 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 913 if (!pci_out_supported(q))
891 return; 914 return;
892 915
893 for_each_output_queue(irq_ptr, q, i) { 916 for_each_output_queue(irq_ptr, q, i) {
894 if (qdio_outbound_q_done(q)) 917 if (qdio_outbound_q_done(q))
895 continue; 918 continue;
896 919 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
897 if (!siga_syncs_out_pci(q))
898 qdio_siga_sync_q(q); 920 qdio_siga_sync_q(q);
899
900 tasklet_schedule(&q->tasklet); 921 tasklet_schedule(&q->tasklet);
901 } 922 }
902} 923}
@@ -959,6 +980,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
959 return; 980 return;
960 } 981 }
961 982
983 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
962 if (irq_ptr->perf_stat_enabled) 984 if (irq_ptr->perf_stat_enabled)
963 irq_ptr->perf_stat.qdio_int++; 985 irq_ptr->perf_stat.qdio_int++;
964 986
@@ -1262,7 +1284,6 @@ int qdio_establish(struct qdio_initialize *init_data)
1262 } 1284 }
1263 1285
1264 qdio_setup_ssqd_info(irq_ptr); 1286 qdio_setup_ssqd_info(irq_ptr);
1265 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1266 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1287 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1267 1288
1268 /* qebsm is now setup if available, initialize buffer states */ 1289 /* qebsm is now setup if available, initialize buffer states */
@@ -1425,7 +1446,7 @@ set:
1425static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1446static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1426 int bufnr, int count) 1447 int bufnr, int count)
1427{ 1448{
1428 unsigned char state; 1449 unsigned char state = 0;
1429 int used, rc = 0; 1450 int used, rc = 0;
1430 1451
1431 qperf_inc(q, outbound_call); 1452 qperf_inc(q, outbound_call);
@@ -1434,52 +1455,38 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1434 used = atomic_add_return(count, &q->nr_buf_used); 1455 used = atomic_add_return(count, &q->nr_buf_used);
1435 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1456 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1436 1457
1458 if (used == QDIO_MAX_BUFFERS_PER_Q)
1459 qperf_inc(q, outbound_queue_full);
1460
1437 if (callflags & QDIO_FLAG_PCI_OUT) { 1461 if (callflags & QDIO_FLAG_PCI_OUT) {
1438 q->u.out.pci_out_enabled = 1; 1462 q->u.out.pci_out_enabled = 1;
1439 qperf_inc(q, pci_request_int); 1463 qperf_inc(q, pci_request_int);
1440 } 1464 } else
1441 else
1442 q->u.out.pci_out_enabled = 0; 1465 q->u.out.pci_out_enabled = 0;
1443 1466
1444 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1467 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1445 if (multicast_outbound(q)) 1468 /* One SIGA-W per buffer required for unicast HiperSockets. */
1469 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1470
1471 rc = qdio_kick_outbound_q(q);
1472 } else if (need_siga_sync(q)) {
1473 rc = qdio_siga_sync_q(q);
1474 } else {
1475 /* try to fast requeue buffers */
1476 get_buf_state(q, prev_buf(bufnr), &state, 0);
1477 if (state != SLSB_CU_OUTPUT_PRIMED)
1446 rc = qdio_kick_outbound_q(q); 1478 rc = qdio_kick_outbound_q(q);
1447 else 1479 else
1448 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1480 qperf_inc(q, fast_requeue);
1449 (count > 1) &&
1450 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1451 /* exploit enhanced SIGA */
1452 q->u.out.use_enh_siga = 1;
1453 rc = qdio_kick_outbound_q(q);
1454 } else {
1455 /*
1456 * One siga-w per buffer required for unicast
1457 * HiperSockets.
1458 */
1459 q->u.out.use_enh_siga = 0;
1460 while (count--) {
1461 rc = qdio_kick_outbound_q(q);
1462 if (rc)
1463 goto out;
1464 }
1465 }
1466 goto out;
1467 } 1481 }
1468 1482
1469 if (need_siga_sync(q)) { 1483 /* in case of SIGA errors we must process the error immediately */
1470 qdio_siga_sync_q(q); 1484 if (used >= q->u.out.scan_threshold || rc)
1471 goto out; 1485 tasklet_schedule(&q->tasklet);
1472 }
1473
1474 /* try to fast requeue buffers */
1475 get_buf_state(q, prev_buf(bufnr), &state, 0);
1476 if (state != SLSB_CU_OUTPUT_PRIMED)
1477 rc = qdio_kick_outbound_q(q);
1478 else 1486 else
1479 qperf_inc(q, fast_requeue); 1487 /* free the SBALs in case of no further traffic */
1480 1488 if (!timer_pending(&q->u.out.timer))
1481out: 1489 mod_timer(&q->u.out.timer, jiffies + HZ);
1482 tasklet_schedule(&q->tasklet);
1483 return rc; 1490 return rc;
1484} 1491}
1485 1492
@@ -1508,7 +1515,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1508 1515
1509 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1516 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1510 return -EBUSY; 1517 return -EBUSY;
1511 1518 if (!count)
1519 return 0;
1512 if (callflags & QDIO_FLAG_SYNC_INPUT) 1520 if (callflags & QDIO_FLAG_SYNC_INPUT)
1513 return handle_inbound(irq_ptr->input_qs[q_nr], 1521 return handle_inbound(irq_ptr->input_qs[q_nr],
1514 callflags, bufnr, count); 1522 callflags, bufnr, count);
@@ -1519,30 +1527,155 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1519} 1527}
1520EXPORT_SYMBOL_GPL(do_QDIO); 1528EXPORT_SYMBOL_GPL(do_QDIO);
1521 1529
1530/**
1531 * qdio_start_irq - process input buffers
1532 * @cdev: associated ccw_device for the qdio subchannel
1533 * @nr: input queue number
1534 *
1535 * Return codes
1536 * 0 - success
1537 * 1 - irqs not started since new data is available
1538 */
1539int qdio_start_irq(struct ccw_device *cdev, int nr)
1540{
1541 struct qdio_q *q;
1542 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1543
1544 if (!irq_ptr)
1545 return -ENODEV;
1546 q = irq_ptr->input_qs[nr];
1547
1548 WARN_ON(queue_irqs_enabled(q));
1549
1550 if (!shared_ind(q->irq_ptr->dsci))
1551 xchg(q->irq_ptr->dsci, 0);
1552
1553 qdio_stop_polling(q);
1554 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1555
1556 /*
1557 * We need to check again to not lose initiative after
1558 * resetting the ACK state.
1559 */
1560 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1561 goto rescan;
1562 if (!qdio_inbound_q_done(q))
1563 goto rescan;
1564 return 0;
1565
1566rescan:
1567 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1568 &q->u.in.queue_irq_state))
1569 return 0;
1570 else
1571 return 1;
1572
1573}
1574EXPORT_SYMBOL(qdio_start_irq);
1575
1576/**
1577 * qdio_get_next_buffers - process input buffers
1578 * @cdev: associated ccw_device for the qdio subchannel
1579 * @nr: input queue number
1580 * @bufnr: first filled buffer number
1581 * @error: buffers are in error state
1582 *
1583 * Return codes
1584 * < 0 - error
1585 * = 0 - no new buffers found
1586 * > 0 - number of processed buffers
1587 */
1588int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1589 int *error)
1590{
1591 struct qdio_q *q;
1592 int start, end;
1593 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1594
1595 if (!irq_ptr)
1596 return -ENODEV;
1597 q = irq_ptr->input_qs[nr];
1598 WARN_ON(queue_irqs_enabled(q));
1599
1600 /*
1601 * Cannot rely on automatic sync after interrupt since queues may
1602 * also be examined without interrupt.
1603 */
1604 if (need_siga_sync(q))
1605 qdio_sync_queues(q);
1606
1607 /* check the PCI capable outbound queues. */
1608 qdio_check_outbound_after_thinint(q);
1609
1610 if (!qdio_inbound_q_moved(q))
1611 return 0;
1612
1613 /* Note: upper-layer MUST stop processing immediately here ... */
1614 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1615 return -EIO;
1616
1617 start = q->first_to_kick;
1618 end = q->first_to_check;
1619 *bufnr = start;
1620 *error = q->qdio_error;
1621
1622 /* for the next time */
1623 q->first_to_kick = end;
1624 q->qdio_error = 0;
1625 return sub_buf(end, start);
1626}
1627EXPORT_SYMBOL(qdio_get_next_buffers);
1628
1629/**
1630 * qdio_stop_irq - disable interrupt processing for the device
1631 * @cdev: associated ccw_device for the qdio subchannel
1632 * @nr: input queue number
1633 *
1634 * Return codes
1635 * 0 - interrupts were already disabled
1636 * 1 - interrupts successfully disabled
1637 */
1638int qdio_stop_irq(struct ccw_device *cdev, int nr)
1639{
1640 struct qdio_q *q;
1641 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1642
1643 if (!irq_ptr)
1644 return -ENODEV;
1645 q = irq_ptr->input_qs[nr];
1646
1647 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1648 &q->u.in.queue_irq_state))
1649 return 0;
1650 else
1651 return 1;
1652}
1653EXPORT_SYMBOL(qdio_stop_irq);
1654
1522static int __init init_QDIO(void) 1655static int __init init_QDIO(void)
1523{ 1656{
1524 int rc; 1657 int rc;
1525 1658
1526 rc = qdio_setup_init(); 1659 rc = qdio_debug_init();
1527 if (rc) 1660 if (rc)
1528 return rc; 1661 return rc;
1662 rc = qdio_setup_init();
1663 if (rc)
1664 goto out_debug;
1529 rc = tiqdio_allocate_memory(); 1665 rc = tiqdio_allocate_memory();
1530 if (rc) 1666 if (rc)
1531 goto out_cache; 1667 goto out_cache;
1532 rc = qdio_debug_init();
1533 if (rc)
1534 goto out_ti;
1535 rc = tiqdio_register_thinints(); 1668 rc = tiqdio_register_thinints();
1536 if (rc) 1669 if (rc)
1537 goto out_debug; 1670 goto out_ti;
1538 return 0; 1671 return 0;
1539 1672
1540out_debug:
1541 qdio_debug_exit();
1542out_ti: 1673out_ti:
1543 tiqdio_free_memory(); 1674 tiqdio_free_memory();
1544out_cache: 1675out_cache:
1545 qdio_setup_exit(); 1676 qdio_setup_exit();
1677out_debug:
1678 qdio_debug_exit();
1546 return rc; 1679 return rc;
1547} 1680}
1548 1681
@@ -1550,8 +1683,8 @@ static void __exit exit_QDIO(void)
1550{ 1683{
1551 tiqdio_unregister_thinints(); 1684 tiqdio_unregister_thinints();
1552 tiqdio_free_memory(); 1685 tiqdio_free_memory();
1553 qdio_debug_exit();
1554 qdio_setup_exit(); 1686 qdio_setup_exit();
1687 qdio_debug_exit();
1555} 1688}
1556 1689
1557module_init(init_QDIO); 1690module_init(init_QDIO);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 34c7e4046df4..89107d0938c4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 162
163 q->is_input_q = 1; 163 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll;
164 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 165 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
165 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
166 167
@@ -177,6 +178,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
177 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
178 179
179 q->is_input_q = 0; 180 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold;
180 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 182 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
181 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 183 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
182 184
@@ -195,14 +197,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
195 irq_ptr->siga_flag.output = 1; 197 irq_ptr->siga_flag.output = 1;
196 if (qdioac & AC1_SIGA_SYNC_NEEDED) 198 if (qdioac & AC1_SIGA_SYNC_NEEDED)
197 irq_ptr->siga_flag.sync = 1; 199 irq_ptr->siga_flag.sync = 1;
198 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) 200 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
199 irq_ptr->siga_flag.no_sync_ti = 1; 201 irq_ptr->siga_flag.sync_after_ai = 1;
200 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) 202 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
201 irq_ptr->siga_flag.no_sync_out_pci = 1; 203 irq_ptr->siga_flag.sync_out_after_pci = 1;
202
203 if (irq_ptr->siga_flag.no_sync_out_pci &&
204 irq_ptr->siga_flag.no_sync_ti)
205 irq_ptr->siga_flag.no_sync_out_ti = 1;
206} 204}
207 205
208static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 206static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -450,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
450 char s[80]; 448 char s[80];
451 449
452 snprintf(s, 80, "qdio: %s %s on SC %x using " 450 snprintf(s, 80, "qdio: %s %s on SC %x using "
453 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", 451 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
454 dev_name(&cdev->dev), 452 dev_name(&cdev->dev),
455 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 453 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
456 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 454 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -462,9 +460,8 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
462 (irq_ptr->siga_flag.input) ? "R" : " ", 460 (irq_ptr->siga_flag.input) ? "R" : " ",
463 (irq_ptr->siga_flag.output) ? "W" : " ", 461 (irq_ptr->siga_flag.output) ? "W" : " ",
464 (irq_ptr->siga_flag.sync) ? "S" : " ", 462 (irq_ptr->siga_flag.sync) ? "S" : " ",
465 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", 463 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
466 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", 464 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
467 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
468 printk(KERN_INFO "%s", s); 465 printk(KERN_INFO "%s", s);
469} 466}
470 467
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8daf1b99f153..5c4e741d8221 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/kernel_stat.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/debug.h> 13#include <asm/debug.h>
13#include <asm/qdio.h> 14#include <asm/qdio.h>
@@ -25,35 +26,17 @@
25 */ 26 */
26#define TIQDIO_NR_NONSHARED_IND 63 27#define TIQDIO_NR_NONSHARED_IND 63
27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 28#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
28#define TIQDIO_SHARED_IND 63
29 29
30/* list of thin interrupt input queues */ 30/* list of thin interrupt input queues */
31static LIST_HEAD(tiq_list); 31static LIST_HEAD(tiq_list);
32DEFINE_MUTEX(tiq_list_lock); 32DEFINE_MUTEX(tiq_list_lock);
33 33
34/* adapter local summary indicator */ 34/* adapter local summary indicator */
35static unsigned char *tiqdio_alsi; 35static u8 *tiqdio_alsi;
36 36
37/* device state change indicators */ 37struct indicator_t *q_indicators;
38struct indicator_t {
39 u32 ind; /* u32 because of compare-and-swap performance */
40 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
41};
42static struct indicator_t *q_indicators;
43 38
44static int css_qdio_omit_svs; 39static u64 last_ai_time;
45
46static inline unsigned long do_clear_global_summary(void)
47{
48 register unsigned long __fn asm("1") = 3;
49 register unsigned long __tmp asm("2");
50 register unsigned long __time asm("3");
51
52 asm volatile(
53 " .insn rre,0xb2650000,2,0"
54 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
55 return __time;
56}
57 40
58/* returns addr for the device state change indicator */ 41/* returns addr for the device state change indicator */
59static u32 *get_indicator(void) 42static u32 *get_indicator(void)
@@ -87,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
87 struct qdio_q *q; 70 struct qdio_q *q;
88 int i; 71 int i;
89 72
90 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
91 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
92 css_qdio_omit_svs = 1;
93
94 mutex_lock(&tiq_list_lock); 73 mutex_lock(&tiq_list_lock);
95 for_each_input_queue(irq_ptr, q, i) 74 for_each_input_queue(irq_ptr, q, i)
96 list_add_rcu(&q->entry, &tiq_list); 75 list_add_rcu(&q->entry, &tiq_list);
@@ -116,65 +95,68 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
116 } 95 }
117} 96}
118 97
119static inline int shared_ind(struct qdio_irq *irq_ptr) 98static inline u32 shared_ind_set(void)
120{ 99{
121 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 100 return q_indicators[TIQDIO_SHARED_IND].ind;
122} 101}
123 102
124/** 103/**
125 * tiqdio_thinint_handler - thin interrupt handler for qdio 104 * tiqdio_thinint_handler - thin interrupt handler for qdio
126 * @ind: pointer to adapter local summary indicator 105 * @alsi: pointer to adapter local summary indicator
127 * @drv_data: NULL 106 * @data: NULL
128 */ 107 */
129static void tiqdio_thinint_handler(void *ind, void *drv_data) 108static void tiqdio_thinint_handler(void *alsi, void *data)
130{ 109{
110 u32 si_used = shared_ind_set();
131 struct qdio_q *q; 111 struct qdio_q *q;
132 112
133 /* 113 last_ai_time = S390_lowcore.int_clock;
134 * SVS only when needed: issue SVS to benefit from iqdio interrupt 114 kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
135 * avoidance (SVS clears adapter interrupt suppression overwrite)
136 */
137 if (!css_qdio_omit_svs)
138 do_clear_global_summary();
139
140 /*
141 * reset local summary indicator (tiqdio_alsi) to stop adapter
142 * interrupts for now
143 */
144 xchg((u8 *)ind, 0);
145 115
146 /* protect tiq_list entries, only changed in activate or shutdown */ 116 /* protect tiq_list entries, only changed in activate or shutdown */
147 rcu_read_lock(); 117 rcu_read_lock();
148 118
149 /* check for work on all inbound thinint queues */ 119 /* check for work on all inbound thinint queues */
150 list_for_each_entry_rcu(q, &tiq_list, entry) 120 list_for_each_entry_rcu(q, &tiq_list, entry) {
121
151 /* only process queues from changed sets */ 122 /* only process queues from changed sets */
152 if (*q->irq_ptr->dsci) { 123 if (unlikely(shared_ind(q->irq_ptr->dsci))) {
153 qperf_inc(q, adapter_int); 124 if (!si_used)
125 continue;
126 } else if (!*q->irq_ptr->dsci)
127 continue;
154 128
129 if (q->u.in.queue_start_poll) {
130 /* skip if polling is enabled or already in work */
131 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
132 &q->u.in.queue_irq_state)) {
133 qperf_inc(q, int_discarded);
134 continue;
135 }
136
137 /* avoid dsci clear here, done after processing */
138 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
139 q->irq_ptr->int_parm);
140 } else {
155 /* only clear it if the indicator is non-shared */ 141 /* only clear it if the indicator is non-shared */
156 if (!shared_ind(q->irq_ptr)) 142 if (!shared_ind(q->irq_ptr->dsci))
157 xchg(q->irq_ptr->dsci, 0); 143 xchg(q->irq_ptr->dsci, 0);
158 /* 144 /*
159 * don't call inbound processing directly since 145 * Call inbound processing but not directly
160 * that could starve other thinint queues 146 * since that could starve other thinint queues.
161 */ 147 */
162 tasklet_schedule(&q->tasklet); 148 tasklet_schedule(&q->tasklet);
163 } 149 }
164 150 qperf_inc(q, adapter_int);
151 }
165 rcu_read_unlock(); 152 rcu_read_unlock();
166 153
167 /* 154 /*
168 * if we used the shared indicator clear it now after all queues 155 * If the shared indicator was used clear it now after all queues
169 * were processed 156 * were processed.
170 */ 157 */
171 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 158 if (si_used && shared_ind_set())
172 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 159 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
173
174 /* prevent racing */
175 if (*tiqdio_alsi)
176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
177 }
178} 160}
179 161
180static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 162static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -259,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
259{ 241{
260 if (!is_thinint_irq(irq_ptr)) 242 if (!is_thinint_irq(irq_ptr))
261 return 0; 243 return 0;
262
263 /* Check for aif time delay disablement. If installed,
264 * omit SVS even under LPAR
265 */
266 if (css_general_characteristics.aif_tdd)
267 css_qdio_omit_svs = 1;
268 return set_subchannel_ind(irq_ptr, 0); 244 return set_subchannel_ind(irq_ptr, 0);
269} 245}
270 246
@@ -282,8 +258,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
282 return; 258 return;
283 259
284 /* reset adapter interrupt indicators */ 260 /* reset adapter interrupt indicators */
285 put_indicator(irq_ptr->dsci);
286 set_subchannel_ind(irq_ptr, 1); 261 set_subchannel_ind(irq_ptr, 1);
262 put_indicator(irq_ptr->dsci);
287} 263}
288 264
289void __exit tiqdio_unregister_thinints(void) 265void __exit tiqdio_unregister_thinints(void)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91c6028d7b74..16e4a25596e7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,6 +27,7 @@
27#define KMSG_COMPONENT "ap" 27#define KMSG_COMPONENT "ap"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 29
30#include <linux/kernel_stat.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -154,14 +155,7 @@ static inline int ap_instructions_available(void)
154 */ 155 */
155static int ap_interrupts_available(void) 156static int ap_interrupts_available(void)
156{ 157{
157 unsigned long long facility_bits[2]; 158 return test_facility(2) && test_facility(65);
158
159 if (stfle(facility_bits, 2) <= 1)
160 return 0;
161 if (!(facility_bits[0] & (1ULL << 61)) ||
162 !(facility_bits[1] & (1ULL << 62)))
163 return 0;
164 return 1;
165} 159}
166 160
167/** 161/**
@@ -228,6 +222,69 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
228} 222}
229#endif 223#endif
230 224
225static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
226 int *support)
227{
228 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
229 register struct ap_queue_status reg1 asm ("1");
230 register unsigned long reg2 asm ("2") = 0UL;
231
232 asm volatile(
233 ".long 0xb2af0000\n"
234 "0: la %1,0\n"
235 "1:\n"
236 EX_TABLE(0b, 1b)
237 : "+d" (reg0), "=d" (reg1), "=d" (reg2)
238 :
239 : "cc");
240
241 if (reg2 & 0x6000000000000000ULL)
242 *support = 1;
243 else
244 *support = 0;
245
246 return reg1;
247}
248
249/**
250 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
251 * support.
252 * @qid: The AP queue number
253 *
254 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
255 */
256int ap_4096_commands_available(ap_qid_t qid)
257{
258 struct ap_queue_status status;
259 int i, support = 0;
260 status = __ap_4096_commands_available(qid, &support);
261
262 for (i = 0; i < AP_MAX_RESET; i++) {
263 switch (status.response_code) {
264 case AP_RESPONSE_NORMAL:
265 return support;
266 case AP_RESPONSE_RESET_IN_PROGRESS:
267 case AP_RESPONSE_BUSY:
268 break;
269 case AP_RESPONSE_Q_NOT_AVAIL:
270 case AP_RESPONSE_DECONFIGURED:
271 case AP_RESPONSE_CHECKSTOPPED:
272 case AP_RESPONSE_INVALID_ADDRESS:
273 return 0;
274 case AP_RESPONSE_OTHERWISE_CHANGED:
275 break;
276 default:
277 break;
278 }
279 if (i < AP_MAX_RESET - 1) {
280 udelay(5);
281 status = __ap_4096_commands_available(qid, &support);
282 }
283 }
284 return support;
285}
286EXPORT_SYMBOL(ap_4096_commands_available);
287
231/** 288/**
232 * ap_queue_enable_interruption(): Enable interruption on an AP. 289 * ap_queue_enable_interruption(): Enable interruption on an AP.
233 * @qid: The AP queue number 290 * @qid: The AP queue number
@@ -1049,6 +1106,7 @@ out:
1049 1106
1050static void ap_interrupt_handler(void *unused1, void *unused2) 1107static void ap_interrupt_handler(void *unused1, void *unused2)
1051{ 1108{
1109 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1052 tasklet_schedule(&ap_tasklet); 1110 tasklet_schedule(&ap_tasklet);
1053} 1111}
1054 1112
@@ -1125,8 +1183,12 @@ static void ap_scan_bus(struct work_struct *unused)
1125 INIT_LIST_HEAD(&ap_dev->list); 1183 INIT_LIST_HEAD(&ap_dev->list);
1126 setup_timer(&ap_dev->timeout, ap_request_timeout, 1184 setup_timer(&ap_dev->timeout, ap_request_timeout,
1127 (unsigned long) ap_dev); 1185 (unsigned long) ap_dev);
1128 if (device_type == 0) 1186 if (device_type == 0) {
1129 ap_probe_device_type(ap_dev); 1187 if (ap_probe_device_type(ap_dev)) {
1188 kfree(ap_dev);
1189 continue;
1190 }
1191 }
1130 else 1192 else
1131 ap_dev->device_type = device_type; 1193 ap_dev->device_type = device_type;
1132 1194
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4785d07cd447..08b9738285b4 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -196,4 +196,6 @@ void ap_flush_queue(struct ap_device *ap_dev);
196int ap_module_init(void); 196int ap_module_init(void);
197void ap_module_exit(void); 197void ap_module_exit(void);
198 198
199int ap_4096_commands_available(ap_qid_t qid);
200
199#endif /* _AP_BUS_H_ */ 201#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 41e0aaefafd5..8e65447f76b7 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -35,7 +35,6 @@
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/compat.h> 37#include <linux/compat.h>
38#include <linux/smp_lock.h>
39#include <linux/slab.h> 38#include <linux/slab.h>
40#include <asm/atomic.h> 39#include <asm/atomic.h>
41#include <asm/uaccess.h> 40#include <asm/uaccess.h>
@@ -397,8 +396,15 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
397 if (copied == 0) { 396 if (copied == 0) {
398 unsigned int len; 397 unsigned int len;
399 spin_unlock_bh(&zcrypt_device_lock); 398 spin_unlock_bh(&zcrypt_device_lock);
400 /* len is max 256 / 2 - 120 = 8 */ 399 /* len is max 256 / 2 - 120 = 8
401 len = crt->inputdatalength / 2 - 120; 400 * For bigger device just assume len of leading
401 * 0s is 8 as stated in the requirements for
402 * ica_rsa_modexpo_crt struct in zcrypt.h.
403 */
404 if (crt->inputdatalength <= 256)
405 len = crt->inputdatalength / 2 - 120;
406 else
407 len = 8;
402 if (len > sizeof(z1)) 408 if (len > sizeof(z1))
403 return -EFAULT; 409 return -EFAULT;
404 z1 = z2 = z3 = 0; 410 z1 = z2 = z3 = 0;
@@ -406,6 +412,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
406 copy_from_user(&z2, crt->bp_key, len) || 412 copy_from_user(&z2, crt->bp_key, len) ||
407 copy_from_user(&z3, crt->u_mult_inv, len)) 413 copy_from_user(&z3, crt->u_mult_inv, len))
408 return -EFAULT; 414 return -EFAULT;
415 z1 = z2 = z3 = 0;
409 copied = 1; 416 copied = 1;
410 /* 417 /*
411 * We have to restart device lookup - 418 * We have to restart device lookup -
@@ -897,7 +904,8 @@ static const struct file_operations zcrypt_fops = {
897 .compat_ioctl = zcrypt_compat_ioctl, 904 .compat_ioctl = zcrypt_compat_ioctl,
898#endif 905#endif
899 .open = zcrypt_open, 906 .open = zcrypt_open,
900 .release = zcrypt_release 907 .release = zcrypt_release,
908 .llseek = no_llseek,
901}; 909};
902 910
903/* 911/*
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 8e7ffbf2466c..9688f3985b07 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -76,7 +76,7 @@ struct ica_z90_status {
76 76
77/** 77/**
78 * Large random numbers are pulled in 4096 byte chunks from the crypto cards 78 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
79 * and stored in a page. Be carefull when increasing this buffer due to size 79 * and stored in a page. Be careful when increasing this buffer due to size
80 * limitations for AP requests. 80 * limitations for AP requests.
81 */ 81 */
82#define ZCRYPT_RNG_BUFFER_SIZE 4096 82#define ZCRYPT_RNG_BUFFER_SIZE 4096
@@ -109,6 +109,7 @@ struct zcrypt_device {
109 int request_count; /* # current requests. */ 109 int request_count; /* # current requests. */
110 110
111 struct ap_message reply; /* Per-device reply structure. */ 111 struct ap_message reply; /* Per-device reply structure. */
112 int max_exp_bit_length;
112}; 113};
113 114
114struct zcrypt_device *zcrypt_device_alloc(size_t); 115struct zcrypt_device *zcrypt_device_alloc(size_t);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 9c409efa1ecf..2176d00b395e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -41,7 +41,7 @@
41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE 43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
44#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE 44#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
45 45
46#define CEX2A_SPEED_RATING 970 46#define CEX2A_SPEED_RATING 970
47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ 47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
@@ -49,8 +49,10 @@
49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
51 51
52#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE 52#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
53#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE 53 * (max outputdatalength) +
54 * type80_hdr*/
55#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
54 56
55#define CEX2A_CLEANUP_TIME (15*HZ) 57#define CEX2A_CLEANUP_TIME (15*HZ)
56#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 58#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
@@ -110,7 +112,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
110 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; 112 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
111 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; 113 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
112 inp = meb1->message + sizeof(meb1->message) - mod_len; 114 inp = meb1->message + sizeof(meb1->message) - mod_len;
113 } else { 115 } else if (mod_len <= 256) {
114 struct type50_meb2_msg *meb2 = ap_msg->message; 116 struct type50_meb2_msg *meb2 = ap_msg->message;
115 memset(meb2, 0, sizeof(*meb2)); 117 memset(meb2, 0, sizeof(*meb2));
116 ap_msg->length = sizeof(*meb2); 118 ap_msg->length = sizeof(*meb2);
@@ -120,6 +122,17 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
120 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; 122 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
121 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; 123 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
122 inp = meb2->message + sizeof(meb2->message) - mod_len; 124 inp = meb2->message + sizeof(meb2->message) - mod_len;
125 } else {
126 /* mod_len > 256 = 4096 bit RSA Key */
127 struct type50_meb3_msg *meb3 = ap_msg->message;
128 memset(meb3, 0, sizeof(*meb3));
129 ap_msg->length = sizeof(*meb3);
130 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
131 meb3->header.msg_len = sizeof(*meb3);
132 meb3->keyblock_type = TYPE50_MEB3_FMT;
133 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
134 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
135 inp = meb3->message + sizeof(meb3->message) - mod_len;
123 } 136 }
124 137
125 if (copy_from_user(mod, mex->n_modulus, mod_len) || 138 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
@@ -142,7 +155,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
142 struct ap_message *ap_msg, 155 struct ap_message *ap_msg,
143 struct ica_rsa_modexpo_crt *crt) 156 struct ica_rsa_modexpo_crt *crt)
144{ 157{
145 int mod_len, short_len, long_len, long_offset; 158 int mod_len, short_len, long_len, long_offset, limit;
146 unsigned char *p, *q, *dp, *dq, *u, *inp; 159 unsigned char *p, *q, *dp, *dq, *u, *inp;
147 160
148 mod_len = crt->inputdatalength; 161 mod_len = crt->inputdatalength;
@@ -152,14 +165,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
152 /* 165 /*
153 * CEX2A cannot handle p, dp, or U > 128 bytes. 166 * CEX2A cannot handle p, dp, or U > 128 bytes.
154 * If we have one of these, we need to do extra checking. 167 * If we have one of these, we need to do extra checking.
168 * For CEX3A the limit is 256 bytes.
155 */ 169 */
156 if (long_len > 128) { 170 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
171 limit = 256;
172 else
173 limit = 128;
174
175 if (long_len > limit) {
157 /* 176 /*
158 * zcrypt_rsa_crt already checked for the leading 177 * zcrypt_rsa_crt already checked for the leading
159 * zeroes of np_prime, bp_key and u_mult_inc. 178 * zeroes of np_prime, bp_key and u_mult_inc.
160 */ 179 */
161 long_offset = long_len - 128; 180 long_offset = long_len - limit;
162 long_len = 128; 181 long_len = limit;
163 } else 182 } else
164 long_offset = 0; 183 long_offset = 0;
165 184
@@ -180,7 +199,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
180 dq = crb1->dq + sizeof(crb1->dq) - short_len; 199 dq = crb1->dq + sizeof(crb1->dq) - short_len;
181 u = crb1->u + sizeof(crb1->u) - long_len; 200 u = crb1->u + sizeof(crb1->u) - long_len;
182 inp = crb1->message + sizeof(crb1->message) - mod_len; 201 inp = crb1->message + sizeof(crb1->message) - mod_len;
183 } else { 202 } else if (long_len <= 128) {
184 struct type50_crb2_msg *crb2 = ap_msg->message; 203 struct type50_crb2_msg *crb2 = ap_msg->message;
185 memset(crb2, 0, sizeof(*crb2)); 204 memset(crb2, 0, sizeof(*crb2));
186 ap_msg->length = sizeof(*crb2); 205 ap_msg->length = sizeof(*crb2);
@@ -193,6 +212,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
193 dq = crb2->dq + sizeof(crb2->dq) - short_len; 212 dq = crb2->dq + sizeof(crb2->dq) - short_len;
194 u = crb2->u + sizeof(crb2->u) - long_len; 213 u = crb2->u + sizeof(crb2->u) - long_len;
195 inp = crb2->message + sizeof(crb2->message) - mod_len; 214 inp = crb2->message + sizeof(crb2->message) - mod_len;
215 } else {
216 /* long_len >= 256 */
217 struct type50_crb3_msg *crb3 = ap_msg->message;
218 memset(crb3, 0, sizeof(*crb3));
219 ap_msg->length = sizeof(*crb3);
220 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
221 crb3->header.msg_len = sizeof(*crb3);
222 crb3->keyblock_type = TYPE50_CRB3_FMT;
223 p = crb3->p + sizeof(crb3->p) - long_len;
224 q = crb3->q + sizeof(crb3->q) - short_len;
225 dp = crb3->dp + sizeof(crb3->dp) - long_len;
226 dq = crb3->dq + sizeof(crb3->dq) - short_len;
227 u = crb3->u + sizeof(crb3->u) - long_len;
228 inp = crb3->message + sizeof(crb3->message) - mod_len;
196 } 229 }
197 230
198 if (copy_from_user(p, crt->np_prime + long_offset, long_len) || 231 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
@@ -203,7 +236,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
203 copy_from_user(inp, crt->inputdata, mod_len)) 236 copy_from_user(inp, crt->inputdata, mod_len))
204 return -EFAULT; 237 return -EFAULT;
205 238
206
207 return 0; 239 return 0;
208} 240}
209 241
@@ -230,7 +262,10 @@ static int convert_type80(struct zcrypt_device *zdev,
230 zdev->online = 0; 262 zdev->online = 0;
231 return -EAGAIN; /* repeat the request on a different device. */ 263 return -EAGAIN; /* repeat the request on a different device. */
232 } 264 }
233 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 265 if (zdev->user_space_type == ZCRYPT_CEX2A)
266 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
267 else
268 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
234 data = reply->message + t80h->len - outputdatalength; 269 data = reply->message + t80h->len - outputdatalength;
235 if (copy_to_user(outputdata, data, outputdatalength)) 270 if (copy_to_user(outputdata, data, outputdatalength))
236 return -EFAULT; 271 return -EFAULT;
@@ -282,7 +317,10 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
282 } 317 }
283 t80h = reply->message; 318 t80h = reply->message;
284 if (t80h->type == TYPE80_RSP_CODE) { 319 if (t80h->type == TYPE80_RSP_CODE) {
285 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); 320 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
321 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
322 else
323 length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
286 memcpy(msg->message, reply->message, length); 324 memcpy(msg->message, reply->message, length);
287 } else 325 } else
288 memcpy(msg->message, reply->message, sizeof error_reply); 326 memcpy(msg->message, reply->message, sizeof error_reply);
@@ -307,7 +345,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
307 int rc; 345 int rc;
308 346
309 ap_init_message(&ap_msg); 347 ap_init_message(&ap_msg);
310 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 348 if (zdev->user_space_type == ZCRYPT_CEX2A)
349 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
350 else
351 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
311 if (!ap_msg.message) 352 if (!ap_msg.message)
312 return -ENOMEM; 353 return -ENOMEM;
313 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 354 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -345,7 +386,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
345 int rc; 386 int rc;
346 387
347 ap_init_message(&ap_msg); 388 ap_init_message(&ap_msg);
348 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 389 if (zdev->user_space_type == ZCRYPT_CEX2A)
390 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
391 else
392 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
349 if (!ap_msg.message) 393 if (!ap_msg.message)
350 return -ENOMEM; 394 return -ENOMEM;
351 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 395 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -397,6 +441,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
397 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 441 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
398 zdev->short_crt = 1; 442 zdev->short_crt = 1;
399 zdev->speed_rating = CEX2A_SPEED_RATING; 443 zdev->speed_rating = CEX2A_SPEED_RATING;
444 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
400 break; 445 break;
401 case AP_DEVICE_TYPE_CEX3A: 446 case AP_DEVICE_TYPE_CEX3A:
402 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 447 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
@@ -404,8 +449,13 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
404 return -ENOMEM; 449 return -ENOMEM;
405 zdev->user_space_type = ZCRYPT_CEX3A; 450 zdev->user_space_type = ZCRYPT_CEX3A;
406 zdev->type_string = "CEX3A"; 451 zdev->type_string = "CEX3A";
407 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE; 452 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
408 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 453 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
454 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
455 if (ap_4096_commands_available(ap_dev->qid)) {
456 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
457 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
458 }
409 zdev->short_crt = 1; 459 zdev->short_crt = 1;
410 zdev->speed_rating = CEX3A_SPEED_RATING; 460 zdev->speed_rating = CEX3A_SPEED_RATING;
411 break; 461 break;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 8f69d1dacab8..0350665810cf 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -51,8 +51,10 @@ struct type50_hdr {
51 51
52#define TYPE50_MEB1_FMT 0x0001 52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002 53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_MEB3_FMT 0x0003
54#define TYPE50_CRB1_FMT 0x0011 55#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012 56#define TYPE50_CRB2_FMT 0x0012
57#define TYPE50_CRB3_FMT 0x0013
56 58
57/* Mod-Exp, with a small modulus */ 59/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg { 60struct type50_meb1_msg {
@@ -74,6 +76,16 @@ struct type50_meb2_msg {
74 unsigned char message[256]; 76 unsigned char message[256];
75} __attribute__((packed)); 77} __attribute__((packed));
76 78
79/* Mod-Exp, with a larger modulus */
80struct type50_meb3_msg {
81 struct type50_hdr header;
82 unsigned short keyblock_type; /* 0x0003 */
83 unsigned char reserved[6];
84 unsigned char exponent[512];
85 unsigned char modulus[512];
86 unsigned char message[512];
87} __attribute__((packed));
88
77/* CRT, with a small modulus */ 89/* CRT, with a small modulus */
78struct type50_crb1_msg { 90struct type50_crb1_msg {
79 struct type50_hdr header; 91 struct type50_hdr header;
@@ -100,6 +112,19 @@ struct type50_crb2_msg {
100 unsigned char message[256]; 112 unsigned char message[256];
101} __attribute__((packed)); 113} __attribute__((packed));
102 114
115/* CRT, with a larger modulus */
116struct type50_crb3_msg {
117 struct type50_hdr header;
118 unsigned short keyblock_type; /* 0x0013 */
119 unsigned char reserved[6];
120 unsigned char p[256];
121 unsigned char q[256];
122 unsigned char dp[256];
123 unsigned char dq[256];
124 unsigned char u[256];
125 unsigned char message[512];
126} __attribute__((packed));
127
103/** 128/**
104 * The type 80 response family is associated with a CEX2A card. 129 * The type 80 response family is associated with a CEX2A card.
105 * 130 *
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 09e934b295a0..1afb69c75fea 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -373,6 +373,7 @@ static int zcrypt_pcica_probe(struct ap_device *ap_dev)
373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE; 373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE; 374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
375 zdev->speed_rating = PCICA_SPEED_RATING; 375 zdev->speed_rating = PCICA_SPEED_RATING;
376 zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
376 ap_dev->reply = &zdev->reply; 377 ap_dev->reply = &zdev->reply;
377 ap_dev->private = zdev; 378 ap_dev->private = zdev;
378 rc = zcrypt_device_register(zdev); 379 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 9dec5c77cff4..aa4c050a5694 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -579,6 +579,7 @@ static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE; 579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE; 580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
581 zdev->speed_rating = PCICC_SPEED_RATING; 581 zdev->speed_rating = PCICC_SPEED_RATING;
582 zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
582 ap_dev->reply = &zdev->reply; 583 ap_dev->reply = &zdev->reply;
583 ap_dev->private = zdev; 584 ap_dev->private = zdev;
584 rc = zcrypt_device_register(zdev); 585 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 510fab4577d4..4f85eb725f4f 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -45,12 +45,12 @@
45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
48#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE 48#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
49 49
50#define PCIXCC_MCL2_SPEED_RATING 7870 50#define PCIXCC_MCL2_SPEED_RATING 7870
51#define PCIXCC_MCL3_SPEED_RATING 7870 51#define PCIXCC_MCL3_SPEED_RATING 7870
52#define CEX2C_SPEED_RATING 7000 52#define CEX2C_SPEED_RATING 7000
53#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */ 53#define CEX3C_SPEED_RATING 6500
54 54
55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -567,6 +567,15 @@ static int convert_response_ica(struct zcrypt_device *zdev,
567 case TYPE88_RSP_CODE: 567 case TYPE88_RSP_CODE:
568 return convert_error(zdev, reply); 568 return convert_error(zdev, reply);
569 case TYPE86_RSP_CODE: 569 case TYPE86_RSP_CODE:
570 if (msg->cprbx.ccp_rtcode &&
571 (msg->cprbx.ccp_rscode == 0x14f) &&
572 (outputdatalength > 256)) {
573 if (zdev->max_exp_bit_length <= 17) {
574 zdev->max_exp_bit_length = 17;
575 return -EAGAIN;
576 } else
577 return -EINVAL;
578 }
570 if (msg->hdr.reply_code) 579 if (msg->hdr.reply_code)
571 return convert_error(zdev, reply); 580 return convert_error(zdev, reply);
572 if (msg->cprbx.cprb_ver_id == 0x02) 581 if (msg->cprbx.cprb_ver_id == 0x02)
@@ -1052,11 +1061,13 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1052 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; 1061 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
1053 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 1062 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
1054 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1063 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1064 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1055 } else { 1065 } else {
1056 zdev->type_string = "PCIXCC_MCL3"; 1066 zdev->type_string = "PCIXCC_MCL3";
1057 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; 1067 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
1058 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1068 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1059 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1069 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1070 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1060 } 1071 }
1061 break; 1072 break;
1062 case AP_DEVICE_TYPE_CEX2C: 1073 case AP_DEVICE_TYPE_CEX2C:
@@ -1065,6 +1076,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1065 zdev->speed_rating = CEX2C_SPEED_RATING; 1076 zdev->speed_rating = CEX2C_SPEED_RATING;
1066 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1077 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1067 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1078 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1079 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1068 break; 1080 break;
1069 case AP_DEVICE_TYPE_CEX3C: 1081 case AP_DEVICE_TYPE_CEX3C:
1070 zdev->user_space_type = ZCRYPT_CEX3C; 1082 zdev->user_space_type = ZCRYPT_CEX3C;
@@ -1072,6 +1084,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1072 zdev->speed_rating = CEX3C_SPEED_RATING; 1084 zdev->speed_rating = CEX3C_SPEED_RATING;
1073 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 1085 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1074 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 1086 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1087 zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
1075 break; 1088 break;
1076 default: 1089 default:
1077 goto out_free; 1090 goto out_free;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 4e298bc8949d..aec60d55b10d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -10,6 +10,7 @@
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> 10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */ 11 */
12 12
13#include <linux/kernel_stat.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/bootmem.h> 15#include <linux/bootmem.h>
15#include <linux/err.h> 16#include <linux/err.h>
@@ -24,7 +25,7 @@
24#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
25#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/s390_ext.h> 28#include <asm/irq.h>
28 29
29#define VIRTIO_SUBCODE_64 0x0D00 30#define VIRTIO_SUBCODE_64 0x0D00
30 31
@@ -32,6 +33,7 @@
32 * The pointer to our (page) of device descriptions. 33 * The pointer to our (page) of device descriptions.
33 */ 34 */
34static void *kvm_devices; 35static void *kvm_devices;
36struct work_struct hotplug_work;
35 37
36struct kvm_device { 38struct kvm_device {
37 struct virtio_device vdev; 39 struct virtio_device vdev;
@@ -328,33 +330,86 @@ static void scan_devices(void)
328} 330}
329 331
330/* 332/*
333 * match for a kvm device with a specific desc pointer
334 */
335static int match_desc(struct device *dev, void *data)
336{
337 if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data)
338 return 1;
339
340 return 0;
341}
342
343/*
344 * hotplug_device tries to find changes in the device page.
345 */
346static void hotplug_devices(struct work_struct *dummy)
347{
348 unsigned int i;
349 struct kvm_device_desc *d;
350 struct device *dev;
351
352 for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
353 d = kvm_devices + i;
354
355 /* end of list */
356 if (d->type == 0)
357 break;
358
359 /* device already exists */
360 dev = device_find_child(kvm_root, d, match_desc);
361 if (dev) {
362 /* XXX check for hotplug remove */
363 put_device(dev);
364 continue;
365 }
366
367 /* new device */
368 printk(KERN_INFO "Adding new virtio device %p\n", d);
369 add_kvm_device(d, i);
370 }
371}
372
373/*
331 * we emulate the request_irq behaviour on top of s390 extints 374 * we emulate the request_irq behaviour on top of s390 extints
332 */ 375 */
333static void kvm_extint_handler(u16 code) 376static void kvm_extint_handler(unsigned int ext_int_code,
377 unsigned int param32, unsigned long param64)
334{ 378{
335 struct virtqueue *vq; 379 struct virtqueue *vq;
336 u16 subcode; 380 u16 subcode;
337 int config_changed; 381 u32 param;
338 382
339 subcode = S390_lowcore.cpu_addr; 383 subcode = ext_int_code >> 16;
340 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 384 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
341 return; 385 return;
386 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
342 387
343 /* The LSB might be overloaded, we have to mask it */ 388 /* The LSB might be overloaded, we have to mask it */
344 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); 389 vq = (struct virtqueue *)(param64 & ~1UL);
345 390
346 /* We use the LSB of extparam, to decide, if this interrupt is a config 391 /* We use ext_params to decide what this interrupt means */
347 * change or a "standard" interrupt */ 392 param = param32 & VIRTIO_PARAM_MASK;
348 config_changed = S390_lowcore.ext_params & 1;
349 393
350 if (config_changed) { 394 switch (param) {
395 case VIRTIO_PARAM_CONFIG_CHANGED:
396 {
351 struct virtio_driver *drv; 397 struct virtio_driver *drv;
352 drv = container_of(vq->vdev->dev.driver, 398 drv = container_of(vq->vdev->dev.driver,
353 struct virtio_driver, driver); 399 struct virtio_driver, driver);
354 if (drv->config_changed) 400 if (drv->config_changed)
355 drv->config_changed(vq->vdev); 401 drv->config_changed(vq->vdev);
356 } else 402
403 break;
404 }
405 case VIRTIO_PARAM_DEV_ADD:
406 schedule_work(&hotplug_work);
407 break;
408 case VIRTIO_PARAM_VRING_INTERRUPT:
409 default:
357 vring_interrupt(0, vq); 410 vring_interrupt(0, vq);
411 break;
412 }
358} 413}
359 414
360/* 415/*
@@ -383,7 +438,9 @@ static int __init kvm_devices_init(void)
383 438
384 kvm_devices = (void *) real_memory_size; 439 kvm_devices = (void *) real_memory_size;
385 440
386 ctl_set_bit(0, 9); 441 INIT_WORK(&hotplug_work, hotplug_devices);
442
443 service_subclass_irq_register();
387 register_external_interrupt(0x2603, kvm_extint_handler); 444 register_external_interrupt(0x2603, kvm_extint_handler);
388 445
389 scan_devices(); 446 scan_devices();
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 977bb4d4ed15..fa80ba1f0344 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -2,7 +2,8 @@ menu "S/390 network device drivers"
2 depends on NETDEVICES && S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 def_tristate m
6 prompt "Lan Channel Station Interface"
6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 8 help
8 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
@@ -12,7 +13,8 @@ config LCS
12 If you do not know what it is, it's safe to choose Y. 13 If you do not know what it is, it's safe to choose Y.
13 14
14config CTCM 15config CTCM
15 tristate "CTC and MPC SNA device support" 16 def_tristate m
17 prompt "CTC and MPC SNA device support"
16 depends on CCW && NETDEVICES 18 depends on CCW && NETDEVICES
17 help 19 help
18 Select this option if you want to use channel-to-channel 20 Select this option if you want to use channel-to-channel
@@ -26,7 +28,8 @@ config CTCM
26 If you do not need any channel-to-channel connection, choose N. 28 If you do not need any channel-to-channel connection, choose N.
27 29
28config NETIUCV 30config NETIUCV
29 tristate "IUCV network device support (VM only)" 31 def_tristate m
32 prompt "IUCV network device support (VM only)"
30 depends on IUCV && NETDEVICES 33 depends on IUCV && NETDEVICES
31 help 34 help
32 Select this option if you want to use inter-user communication 35 Select this option if you want to use inter-user communication
@@ -37,14 +40,16 @@ config NETIUCV
37 The module name is netiucv. If unsure, choose Y. 40 The module name is netiucv. If unsure, choose Y.
38 41
39config SMSGIUCV 42config SMSGIUCV
40 tristate "IUCV special message support (VM only)" 43 def_tristate m
44 prompt "IUCV special message support (VM only)"
41 depends on IUCV 45 depends on IUCV
42 help 46 help
43 Select this option if you want to be able to receive SMSG messages 47 Select this option if you want to be able to receive SMSG messages
44 from other VM guest systems. 48 from other VM guest systems.
45 49
46config SMSGIUCV_EVENT 50config SMSGIUCV_EVENT
47 tristate "Deliver IUCV special messages as uevents (VM only)" 51 def_tristate m
52 prompt "Deliver IUCV special messages as uevents (VM only)"
48 depends on SMSGIUCV 53 depends on SMSGIUCV
49 help 54 help
50 Select this option to deliver CP special messages (SMSGs) as 55 Select this option to deliver CP special messages (SMSGs) as
@@ -54,7 +59,8 @@ config SMSGIUCV_EVENT
54 To compile as a module, choose M. The module name is "smsgiucv_app". 59 To compile as a module, choose M. The module name is "smsgiucv_app".
55 60
56config CLAW 61config CLAW
57 tristate "CLAW device support" 62 def_tristate m
63 prompt "CLAW device support"
58 depends on CCW && NETDEVICES 64 depends on CCW && NETDEVICES
59 help 65 help
60 This driver supports channel attached CLAW devices. 66 This driver supports channel attached CLAW devices.
@@ -64,7 +70,8 @@ config CLAW
64 To compile into the kernel, choose Y. 70 To compile into the kernel, choose Y.
65 71
66config QETH 72config QETH
67 tristate "Gigabit Ethernet device support" 73 def_tristate y
74 prompt "Gigabit Ethernet device support"
68 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO 75 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
69 help 76 help
70 This driver supports the IBM System z OSA Express adapters 77 This driver supports the IBM System z OSA Express adapters
@@ -78,28 +85,28 @@ config QETH
78 The module name is qeth. 85 The module name is qeth.
79 86
80config QETH_L2 87config QETH_L2
81 tristate "qeth layer 2 device support" 88 def_tristate y
82 depends on QETH 89 prompt "qeth layer 2 device support"
83 help 90 depends on QETH
84 Select this option to be able to run qeth devices in layer 2 mode. 91 help
85 To compile as a module, choose M. The module name is qeth_l2. 92 Select this option to be able to run qeth devices in layer 2 mode.
86 If unsure, choose y. 93 To compile as a module, choose M. The module name is qeth_l2.
94 If unsure, choose y.
87 95
88config QETH_L3 96config QETH_L3
89 tristate "qeth layer 3 device support" 97 def_tristate y
90 depends on QETH 98 prompt "qeth layer 3 device support"
91 help 99 depends on QETH
92 Select this option to be able to run qeth devices in layer 3 mode. 100 help
93 To compile as a module choose M. The module name is qeth_l3. 101 Select this option to be able to run qeth devices in layer 3 mode.
94 If unsure, choose Y. 102 To compile as a module choose M. The module name is qeth_l3.
103 If unsure, choose Y.
95 104
96config QETH_IPV6 105config QETH_IPV6
97 bool 106 def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
98 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
99 default y
100 107
101config CCWGROUP 108config CCWGROUP
102 tristate 109 tristate
103 default (LCS || CTCM || QETH) 110 default (LCS || CTCM || QETH || CLAW)
104 111
105endmenu 112endmenu
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 8e4153d740f3..f1fa2483ae6b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,6 +63,7 @@
63 63
64#define KMSG_COMPONENT "claw" 64#define KMSG_COMPONENT "claw"
65 65
66#include <linux/kernel_stat.h>
66#include <asm/ccwdev.h> 67#include <asm/ccwdev.h>
67#include <asm/ccwgroup.h> 68#include <asm/ccwgroup.h>
68#include <asm/debug.h> 69#include <asm/debug.h>
@@ -263,8 +264,10 @@ static struct device *claw_root_dev;
263/* ccwgroup table */ 264/* ccwgroup table */
264 265
265static struct ccwgroup_driver claw_group_driver = { 266static struct ccwgroup_driver claw_group_driver = {
266 .owner = THIS_MODULE, 267 .driver = {
267 .name = "claw", 268 .owner = THIS_MODULE,
269 .name = "claw",
270 },
268 .max_slaves = 2, 271 .max_slaves = 2,
269 .driver_id = 0xC3D3C1E6, 272 .driver_id = 0xC3D3C1E6,
270 .probe = claw_probe, 273 .probe = claw_probe,
@@ -281,8 +284,10 @@ static struct ccw_device_id claw_ids[] = {
281MODULE_DEVICE_TABLE(ccw, claw_ids); 284MODULE_DEVICE_TABLE(ccw, claw_ids);
282 285
283static struct ccw_driver claw_ccw_driver = { 286static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE, 287 .driver = {
285 .name = "claw", 288 .owner = THIS_MODULE,
289 .name = "claw",
290 },
286 .ids = claw_ids, 291 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev, 292 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev, 293 .remove = ccwgroup_remove_ccwdev,
@@ -640,6 +645,7 @@ claw_irq_handler(struct ccw_device *cdev,
640 struct claw_env *p_env; 645 struct claw_env *p_env;
641 struct chbk *p_ch_r=NULL; 646 struct chbk *p_ch_r=NULL;
642 647
648 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
643 CLAW_DBF_TEXT(4, trace, "clawirq"); 649 CLAW_DBF_TEXT(4, trace, "clawirq");
644 /* Bypass all 'unsolicited interrupts' */ 650 /* Bypass all 'unsolicited interrupts' */
645 privptr = dev_get_drvdata(&cdev->dev); 651 privptr = dev_get_drvdata(&cdev->dev);
@@ -773,7 +779,7 @@ claw_irq_handler(struct ccw_device *cdev,
773 case CLAW_START_WRITE: 779 case CLAW_START_WRITE:
774 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { 780 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
775 dev_info(&cdev->dev, 781 dev_info(&cdev->dev,
776 "%s: Unit Check Occured in " 782 "%s: Unit Check Occurred in "
777 "write channel\n", dev->name); 783 "write channel\n", dev->name);
778 clear_bit(0, (void *)&p_ch->IO_active); 784 clear_bit(0, (void *)&p_ch->IO_active);
779 if (p_ch->irb->ecw[0] & 0x80) { 785 if (p_ch->irb->ecw[0] & 0x80) {
@@ -839,12 +845,10 @@ claw_irq_tasklet ( unsigned long data )
839{ 845{
840 struct chbk * p_ch; 846 struct chbk * p_ch;
841 struct net_device *dev; 847 struct net_device *dev;
842 struct claw_privbk * privptr;
843 848
844 p_ch = (struct chbk *) data; 849 p_ch = (struct chbk *) data;
845 dev = (struct net_device *)p_ch->ndev; 850 dev = (struct net_device *)p_ch->ndev;
846 CLAW_DBF_TEXT(4, trace, "IRQtask"); 851 CLAW_DBF_TEXT(4, trace, "IRQtask");
847 privptr = (struct claw_privbk *)dev->ml_priv;
848 unpack_read(dev); 852 unpack_read(dev);
849 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); 853 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
850 CLAW_DBF_TEXT(4, trace, "TskletXt"); 854 CLAW_DBF_TEXT(4, trace, "TskletXt");
@@ -1020,7 +1024,6 @@ claw_write_next ( struct chbk * p_ch )
1020 struct net_device *dev; 1024 struct net_device *dev;
1021 struct claw_privbk *privptr=NULL; 1025 struct claw_privbk *privptr=NULL;
1022 struct sk_buff *pk_skb; 1026 struct sk_buff *pk_skb;
1023 int rc;
1024 1027
1025 CLAW_DBF_TEXT(4, trace, "claw_wrt"); 1028 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1026 if (p_ch->claw_state == CLAW_STOP) 1029 if (p_ch->claw_state == CLAW_STOP)
@@ -1032,7 +1035,7 @@ claw_write_next ( struct chbk * p_ch )
1032 !skb_queue_empty(&p_ch->collect_queue)) { 1035 !skb_queue_empty(&p_ch->collect_queue)) {
1033 pk_skb = claw_pack_skb(privptr); 1036 pk_skb = claw_pack_skb(privptr);
1034 while (pk_skb != NULL) { 1037 while (pk_skb != NULL) {
1035 rc = claw_hw_tx( pk_skb, dev,1); 1038 claw_hw_tx(pk_skb, dev, 1);
1036 if (privptr->write_free_count > 0) { 1039 if (privptr->write_free_count > 0) {
1037 pk_skb = claw_pack_skb(privptr); 1040 pk_skb = claw_pack_skb(privptr);
1038 } else 1041 } else
@@ -1316,15 +1319,12 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1316 unsigned char *pDataAddress; 1319 unsigned char *pDataAddress;
1317 struct endccw *pEnd; 1320 struct endccw *pEnd;
1318 struct ccw1 tempCCW; 1321 struct ccw1 tempCCW;
1319 struct chbk *p_ch;
1320 struct claw_env *p_env; 1322 struct claw_env *p_env;
1321 int lock;
1322 struct clawph *pk_head; 1323 struct clawph *pk_head;
1323 struct chbk *ch; 1324 struct chbk *ch;
1324 1325
1325 CLAW_DBF_TEXT(4, trace, "hw_tx"); 1326 CLAW_DBF_TEXT(4, trace, "hw_tx");
1326 privptr = (struct claw_privbk *)(dev->ml_priv); 1327 privptr = (struct claw_privbk *)(dev->ml_priv);
1327 p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
1328 p_env =privptr->p_env; 1328 p_env =privptr->p_env;
1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ 1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1330 /* scan the write queue to free any completed write packets */ 1330 /* scan the write queue to free any completed write packets */
@@ -1505,12 +1505,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1505 1505
1506 } /* endif (p_first_ccw!=NULL) */ 1506 } /* endif (p_first_ccw!=NULL) */
1507 dev_kfree_skb_any(skb); 1507 dev_kfree_skb_any(skb);
1508 if (linkid==0) {
1509 lock=LOCK_NO;
1510 }
1511 else {
1512 lock=LOCK_YES;
1513 }
1514 claw_strt_out_IO(dev ); 1508 claw_strt_out_IO(dev );
1515 /* if write free count is zero , set NOBUFFER */ 1509 /* if write free count is zero , set NOBUFFER */
1516 if (privptr->write_free_count==0) { 1510 if (privptr->write_free_count==0) {
@@ -2815,15 +2809,11 @@ claw_free_wrt_buf( struct net_device *dev )
2815{ 2809{
2816 2810
2817 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; 2811 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2818 struct ccwbk*p_first_ccw;
2819 struct ccwbk*p_last_ccw;
2820 struct ccwbk*p_this_ccw; 2812 struct ccwbk*p_this_ccw;
2821 struct ccwbk*p_next_ccw; 2813 struct ccwbk*p_next_ccw;
2822 2814
2823 CLAW_DBF_TEXT(4, trace, "freewrtb"); 2815 CLAW_DBF_TEXT(4, trace, "freewrtb");
2824 /* scan the write queue to free any completed write packets */ 2816 /* scan the write queue to free any completed write packets */
2825 p_first_ccw=NULL;
2826 p_last_ccw=NULL;
2827 p_this_ccw=privptr->p_write_active_first; 2817 p_this_ccw=privptr->p_write_active_first;
2828 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) 2818 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2829 { 2819 {
@@ -3066,7 +3056,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3066{ 3056{
3067 struct claw_privbk *priv; 3057 struct claw_privbk *priv;
3068 struct net_device *ndev; 3058 struct net_device *ndev;
3069 int ret; 3059 int ret = 0;
3070 3060
3071 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3061 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3072 priv = dev_get_drvdata(&cgdev->dev); 3062 priv = dev_get_drvdata(&cgdev->dev);
@@ -3089,7 +3079,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3089 } 3079 }
3090 ccw_device_set_offline(cgdev->cdev[1]); 3080 ccw_device_set_offline(cgdev->cdev[1]);
3091 ccw_device_set_offline(cgdev->cdev[0]); 3081 ccw_device_set_offline(cgdev->cdev[0]);
3092 return 0; 3082 return ret;
3093} 3083}
3094 3084
3095static void 3085static void
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 8c921fc3511a..2d602207541b 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -184,7 +184,7 @@ static void ctcmpc_chx_resend(fsm_instance *, int, void *);
184static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 184static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
185 185
186/** 186/**
187 * Check return code of a preceeding ccw_device call, halt_IO etc... 187 * Check return code of a preceding ccw_device call, halt_IO etc...
188 * 188 *
189 * ch : The channel, the error belongs to. 189 * ch : The channel, the error belongs to.
190 * Returns the error code (!= 0) to inspect. 190 * Returns the error code (!= 0) to inspect.
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2c7d2d9be4d0..426787efc492 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,6 +24,7 @@
24#define KMSG_COMPONENT "ctcm" 24#define KMSG_COMPONENT "ctcm"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 26
27#include <linux/kernel_stat.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/init.h> 29#include <linux/init.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -671,7 +672,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
671 int ccw_idx; 672 int ccw_idx;
672 unsigned long hi; 673 unsigned long hi;
673 unsigned long saveflags = 0; /* avoids compiler warning */ 674 unsigned long saveflags = 0; /* avoids compiler warning */
674 __u16 block_len;
675 675
676 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", 676 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
677 __func__, dev->name, smp_processor_id(), ch, 677 __func__, dev->name, smp_processor_id(), ch,
@@ -718,7 +718,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
718 */ 718 */
719 atomic_inc(&skb->users); 719 atomic_inc(&skb->users);
720 720
721 block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
722 /* 721 /*
723 * IDAL support in CTCM is broken, so we have to 722 * IDAL support in CTCM is broken, so we have to
724 * care about skb's above 2G ourselves. 723 * care about skb's above 2G ourselves.
@@ -1204,6 +1203,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1204 int cstat; 1203 int cstat;
1205 int dstat; 1204 int dstat;
1206 1205
1206 kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); 1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1209 1209
@@ -1762,16 +1762,20 @@ static struct ccw_device_id ctcm_ids[] = {
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids); 1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763 1763
1764static struct ccw_driver ctcm_ccw_driver = { 1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE, 1765 .driver = {
1766 .name = "ctcm", 1766 .owner = THIS_MODULE,
1767 .name = "ctcm",
1768 },
1767 .ids = ctcm_ids, 1769 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev, 1770 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev, 1771 .remove = ccwgroup_remove_ccwdev,
1770}; 1772};
1771 1773
1772static struct ccwgroup_driver ctcm_group_driver = { 1774static struct ccwgroup_driver ctcm_group_driver = {
1773 .owner = THIS_MODULE, 1775 .driver = {
1774 .name = CTC_DRIVER_NAME, 1776 .owner = THIS_MODULE,
1777 .name = CTC_DRIVER_NAME,
1778 },
1775 .max_slaves = 2, 1779 .max_slaves = 2,
1776 .driver_id = 0xC3E3C3D4, /* CTCM */ 1780 .driver_id = 0xC3E3C3D4, /* CTCM */
1777 .probe = ctcm_probe_device, 1781 .probe = ctcm_probe_device,
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 2861e78773cb..da4c747335e7 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -540,7 +540,7 @@ void ctc_mpc_dealloc_ch(int port_num)
540 540
541 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, 541 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
542 "%s: %s: refcount = %d\n", 542 "%s: %s: refcount = %d\n",
543 CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); 543 CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
544 544
545 fsm_deltimer(&priv->restart_timer); 545 fsm_deltimer(&priv->restart_timer);
546 grp->channels_terminating = 0; 546 grp->channels_terminating = 0;
@@ -653,7 +653,6 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
653 struct net_device *dev = rch->netdev; 653 struct net_device *dev = rch->netdev;
654 struct ctcm_priv *priv = dev->ml_priv; 654 struct ctcm_priv *priv = dev->ml_priv;
655 struct mpc_group *grp = priv->mpcg; 655 struct mpc_group *grp = priv->mpcg;
656 int rc = 0;
657 struct th_sweep *header; 656 struct th_sweep *header;
658 struct sk_buff *sweep_skb; 657 struct sk_buff *sweep_skb;
659 struct channel *ch = priv->channel[CTCM_WRITE]; 658 struct channel *ch = priv->channel[CTCM_WRITE];
@@ -665,16 +664,14 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
665 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 664 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
666 "%s(%s): sweep_skb allocation ERROR\n", 665 "%s(%s): sweep_skb allocation ERROR\n",
667 CTCM_FUNTAIL, rch->id); 666 CTCM_FUNTAIL, rch->id);
668 rc = -ENOMEM; 667 goto done;
669 goto done;
670 } 668 }
671 669
672 header = kmalloc(sizeof(struct th_sweep), gfp_type()); 670 header = kmalloc(sizeof(struct th_sweep), gfp_type());
673 671
674 if (!header) { 672 if (!header) {
675 dev_kfree_skb_any(sweep_skb); 673 dev_kfree_skb_any(sweep_skb);
676 rc = -ENOMEM; 674 goto done;
677 goto done;
678 } 675 }
679 676
680 header->th.th_seg = 0x00 ; 677 header->th.th_seg = 0x00 ;
@@ -1370,8 +1367,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1370 struct net_device *dev = arg; 1367 struct net_device *dev = arg;
1371 struct ctcm_priv *priv; 1368 struct ctcm_priv *priv;
1372 struct mpc_group *grp; 1369 struct mpc_group *grp;
1373 int rc = 0; 1370 struct channel *wch;
1374 struct channel *wch, *rch;
1375 1371
1376 BUG_ON(dev == NULL); 1372 BUG_ON(dev == NULL);
1377 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1373 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
@@ -1396,7 +1392,6 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1396 fsm_deltimer(&priv->restart_timer); 1392 fsm_deltimer(&priv->restart_timer);
1397 1393
1398 wch = priv->channel[CTCM_WRITE]; 1394 wch = priv->channel[CTCM_WRITE];
1399 rch = priv->channel[CTCM_READ];
1400 1395
1401 switch (grp->saved_state) { 1396 switch (grp->saved_state) {
1402 case MPCG_STATE_RESET: 1397 case MPCG_STATE_RESET:
@@ -1435,7 +1430,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1435 1430
1436 if (grp->send_qllc_disc == 1) { 1431 if (grp->send_qllc_disc == 1) {
1437 grp->send_qllc_disc = 0; 1432 grp->send_qllc_disc = 0;
1438 rc = mpc_send_qllc_discontact(dev); 1433 mpc_send_qllc_discontact(dev);
1439 } 1434 }
1440 1435
1441 /* DO NOT issue DEV_EVENT_STOP directly out of this code */ 1436 /* DO NOT issue DEV_EVENT_STOP directly out of this code */
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0f19d540b655..c3b8064a102d 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,6 +26,7 @@
26#define KMSG_COMPONENT "lcs" 26#define KMSG_COMPONENT "lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 28
29#include <linux/kernel_stat.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/if.h> 31#include <linux/if.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -840,7 +841,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
840} 841}
841 842
842/** 843/**
843 * Emit buffer of a lan comand. 844 * Emit buffer of a lan command.
844 */ 845 */
845static void 846static void
846lcs_lancmd_timeout(unsigned long data) 847lcs_lancmd_timeout(unsigned long data)
@@ -1122,7 +1123,7 @@ list_modified:
1122 list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ 1123 list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1123 switch (ipm->ipm_state) { 1124 switch (ipm->ipm_state) {
1124 case LCS_IPM_STATE_SET_REQUIRED: 1125 case LCS_IPM_STATE_SET_REQUIRED:
1125 /* del from ipm_list so noone else can tamper with 1126 /* del from ipm_list so no one else can tamper with
1126 * this entry */ 1127 * this entry */
1127 list_del_init(&ipm->list); 1128 list_del_init(&ipm->list);
1128 spin_unlock_irqrestore(&card->ipm_lock, flags); 1129 spin_unlock_irqrestore(&card->ipm_lock, flags);
@@ -1188,7 +1189,8 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1188 spin_lock_irqsave(&card->ipm_lock, flags); 1189 spin_lock_irqsave(&card->ipm_lock, flags);
1189 list_for_each(l, &card->ipm_list) { 1190 list_for_each(l, &card->ipm_list) {
1190 ipm = list_entry(l, struct lcs_ipm_list, list); 1191 ipm = list_entry(l, struct lcs_ipm_list, list);
1191 for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) { 1192 for (im4 = rcu_dereference(in4_dev->mc_list);
1193 im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
1192 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1194 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1193 if ( (ipm->ipm.ip_addr == im4->multiaddr) && 1195 if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1194 (memcmp(buf, &ipm->ipm.mac_addr, 1196 (memcmp(buf, &ipm->ipm.mac_addr,
@@ -1233,7 +1235,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1233 unsigned long flags; 1235 unsigned long flags;
1234 1236
1235 LCS_DBF_TEXT(4, trace, "setmclst"); 1237 LCS_DBF_TEXT(4, trace, "setmclst");
1236 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1238 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1239 im4 = rcu_dereference(im4->next_rcu)) {
1237 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1240 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1238 ipm = lcs_check_addr_entry(card, im4, buf); 1241 ipm = lcs_check_addr_entry(card, im4, buf);
1239 if (ipm != NULL) 1242 if (ipm != NULL)
@@ -1269,10 +1272,10 @@ lcs_register_mc_addresses(void *data)
1269 in4_dev = in_dev_get(card->dev); 1272 in4_dev = in_dev_get(card->dev);
1270 if (in4_dev == NULL) 1273 if (in4_dev == NULL)
1271 goto out; 1274 goto out;
1272 read_lock(&in4_dev->mc_list_lock); 1275 rcu_read_lock();
1273 lcs_remove_mc_addresses(card,in4_dev); 1276 lcs_remove_mc_addresses(card,in4_dev);
1274 lcs_set_mc_addresses(card, in4_dev); 1277 lcs_set_mc_addresses(card, in4_dev);
1275 read_unlock(&in4_dev->mc_list_lock); 1278 rcu_read_unlock();
1276 in_dev_put(in4_dev); 1279 in_dev_put(in4_dev);
1277 1280
1278 netif_carrier_off(card->dev); 1281 netif_carrier_off(card->dev);
@@ -1396,6 +1399,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1396 int rc, index; 1399 int rc, index;
1397 int cstat, dstat; 1400 int cstat, dstat;
1398 1401
1402 kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
1399 if (lcs_check_irb_error(cdev, irb)) 1403 if (lcs_check_irb_error(cdev, irb))
1400 return; 1404 return;
1401 1405
@@ -1479,7 +1483,6 @@ lcs_tasklet(unsigned long data)
1479 struct lcs_channel *channel; 1483 struct lcs_channel *channel;
1480 struct lcs_buffer *iob; 1484 struct lcs_buffer *iob;
1481 int buf_idx; 1485 int buf_idx;
1482 int rc;
1483 1486
1484 channel = (struct lcs_channel *) data; 1487 channel = (struct lcs_channel *) data;
1485 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); 1488 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
@@ -1496,14 +1499,11 @@ lcs_tasklet(unsigned long data)
1496 channel->buf_idx = buf_idx; 1499 channel->buf_idx = buf_idx;
1497 1500
1498 if (channel->state == LCS_CH_STATE_STOPPED) 1501 if (channel->state == LCS_CH_STATE_STOPPED)
1499 // FIXME: what if rc != 0 ?? 1502 lcs_start_channel(channel);
1500 rc = lcs_start_channel(channel);
1501 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1503 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1502 if (channel->state == LCS_CH_STATE_SUSPENDED && 1504 if (channel->state == LCS_CH_STATE_SUSPENDED &&
1503 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) { 1505 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
1504 // FIXME: what if rc != 0 ?? 1506 __lcs_resume_channel(channel);
1505 rc = __lcs_resume_channel(channel);
1506 }
1507 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1507 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1508 1508
1509 /* Something happened on the channel. Wake up waiters. */ 1509 /* Something happened on the channel. Wake up waiters. */
@@ -2392,8 +2392,10 @@ static struct ccw_device_id lcs_ids[] = {
2392MODULE_DEVICE_TABLE(ccw, lcs_ids); 2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393 2393
2394static struct ccw_driver lcs_ccw_driver = { 2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE, 2395 .driver = {
2396 .name = "lcs", 2396 .owner = THIS_MODULE,
2397 .name = "lcs",
2398 },
2397 .ids = lcs_ids, 2399 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev, 2400 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev, 2401 .remove = ccwgroup_remove_ccwdev,
@@ -2403,8 +2405,10 @@ static struct ccw_driver lcs_ccw_driver = {
2403 * LCS ccwgroup driver registration 2405 * LCS ccwgroup driver registration
2404 */ 2406 */
2405static struct ccwgroup_driver lcs_group_driver = { 2407static struct ccwgroup_driver lcs_group_driver = {
2406 .owner = THIS_MODULE, 2408 .driver = {
2407 .name = "lcs", 2409 .owner = THIS_MODULE,
2410 .name = "lcs",
2411 },
2408 .max_slaves = 2, 2412 .max_slaves = 2,
2409 .driver_id = 0xD3C3E2, 2413 .driver_id = 0xD3C3E2,
2410 .probe = lcs_probe_device, 2414 .probe = lcs_probe_device,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0a3266..3251333a23df 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
565 struct iucv_event ev; 565 struct iucv_event ev;
566 int rc; 566 int rc;
567 567
568 if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) 568 if (memcmp(iucvMagic, ipuser, 16))
569 /* ipuser must match iucvMagic. */ 569 /* ipuser must match iucvMagic. */
570 return -EINVAL; 570 return -EINVAL;
571 rc = -EINVAL; 571 rc = -EINVAL;
@@ -1994,8 +1994,6 @@ static struct net_device *netiucv_init_netdevice(char *username)
1994 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
1995 if (!dev) 1995 if (!dev)
1996 return NULL; 1996 return NULL;
1997 if (dev_alloc_name(dev, dev->name) < 0)
1998 goto out_netdev;
1999 1997
2000 privptr = netdev_priv(dev); 1998 privptr = netdev_priv(dev);
2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d1257768be90..d3cee33e554c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -225,7 +225,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
225/*****************************************************************************/ 225/*****************************************************************************/
226#define QETH_MAX_QUEUES 4 226#define QETH_MAX_QUEUES 4
227#define QETH_IN_BUF_SIZE_DEFAULT 65536 227#define QETH_IN_BUF_SIZE_DEFAULT 65536
228#define QETH_IN_BUF_COUNT_DEFAULT 16 228#define QETH_IN_BUF_COUNT_DEFAULT 64
229#define QETH_IN_BUF_COUNT_HSDEFAULT 128
229#define QETH_IN_BUF_COUNT_MIN 8 230#define QETH_IN_BUF_COUNT_MIN 8
230#define QETH_IN_BUF_COUNT_MAX 128 231#define QETH_IN_BUF_COUNT_MAX 128
231#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 232#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
@@ -360,7 +361,7 @@ enum qeth_header_ids {
360 361
361static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
362{ 363{
363 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); 364 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
364} 365}
365 366
366enum qeth_qdio_buffer_states { 367enum qeth_qdio_buffer_states {
@@ -406,12 +407,6 @@ struct qeth_qdio_q {
406 int next_buf_to_init; 407 int next_buf_to_init;
407} __attribute__ ((aligned(256))); 408} __attribute__ ((aligned(256)));
408 409
409/* possible types of qeth large_send support */
410enum qeth_large_send_types {
411 QETH_LARGE_SEND_NO,
412 QETH_LARGE_SEND_TSO,
413};
414
415struct qeth_qdio_out_buffer { 410struct qeth_qdio_out_buffer {
416 struct qdio_buffer *buffer; 411 struct qdio_buffer *buffer;
417 atomic_t state; 412 atomic_t state;
@@ -440,7 +435,6 @@ struct qeth_qdio_out_q {
440 * index of buffer to be filled by driver; state EMPTY or PACKING 435 * index of buffer to be filled by driver; state EMPTY or PACKING
441 */ 436 */
442 int next_buf_to_fill; 437 int next_buf_to_fill;
443 int sync_iqdio_error;
444 /* 438 /*
445 * number of buffers that are currently filled (PRIMED) 439 * number of buffers that are currently filled (PRIMED)
446 * -> these buffers are hardware-owned 440 * -> these buffers are hardware-owned
@@ -637,6 +631,8 @@ struct qeth_card_info {
637 __u32 csum_mask; 631 __u32 csum_mask;
638 __u32 tx_csum_mask; 632 __u32 tx_csum_mask;
639 enum qeth_ipa_promisc_modes promisc_mode; 633 enum qeth_ipa_promisc_modes promisc_mode;
634 __u32 diagass_support;
635 __u32 hwtrap;
640}; 636};
641 637
642struct qeth_card_options { 638struct qeth_card_options {
@@ -645,13 +641,11 @@ struct qeth_card_options {
645 struct qeth_ipa_info adp; /*Adapter parameters*/ 641 struct qeth_ipa_info adp; /*Adapter parameters*/
646 struct qeth_routing_info route6; 642 struct qeth_routing_info route6;
647 struct qeth_ipa_info ipa6; 643 struct qeth_ipa_info ipa6;
648 enum qeth_checksum_types checksum_type;
649 int broadcast_mode; 644 int broadcast_mode;
650 int macaddr_mode; 645 int macaddr_mode;
651 int fake_broadcast; 646 int fake_broadcast;
652 int add_hhlen; 647 int add_hhlen;
653 int layer2; 648 int layer2;
654 enum qeth_large_send_types large_send;
655 int performance_stats; 649 int performance_stats;
656 int rx_sg_cb; 650 int rx_sg_cb;
657 enum qeth_ipa_isolation_modes isolation; 651 enum qeth_ipa_isolation_modes isolation;
@@ -676,6 +670,7 @@ enum qeth_discipline_id {
676}; 670};
677 671
678struct qeth_discipline { 672struct qeth_discipline {
673 void (*start_poll)(struct ccw_device *, int, unsigned long);
679 qdio_handler_t *input_handler; 674 qdio_handler_t *input_handler;
680 qdio_handler_t *output_handler; 675 qdio_handler_t *output_handler;
681 int (*recover)(void *ptr); 676 int (*recover)(void *ptr);
@@ -694,13 +689,15 @@ struct qeth_mc_mac {
694 int is_vmac; 689 int is_vmac;
695}; 690};
696 691
697struct qeth_skb_data { 692struct qeth_rx {
698 __u32 magic; 693 int b_count;
699 int count; 694 int b_index;
695 struct qdio_buffer_element *b_element;
696 int e_offset;
697 int qdio_err;
700}; 698};
701 699
702#define QETH_SKB_MAGIC 0x71657468 700#define QETH_NAPI_WEIGHT 128
703#define QETH_SIGA_CC2_RETRIES 3
704 701
705struct qeth_card { 702struct qeth_card {
706 struct list_head list; 703 struct list_head list;
@@ -739,7 +736,6 @@ struct qeth_card {
739 /* QDIO buffer handling */ 736 /* QDIO buffer handling */
740 struct qeth_qdio_info qdio; 737 struct qeth_qdio_info qdio;
741 struct qeth_perf_stats perf_stats; 738 struct qeth_perf_stats perf_stats;
742 int use_hard_stop;
743 int read_or_write_problem; 739 int read_or_write_problem;
744 struct qeth_osn_info osn_info; 740 struct qeth_osn_info osn_info;
745 struct qeth_discipline discipline; 741 struct qeth_discipline discipline;
@@ -749,6 +745,8 @@ struct qeth_card {
749 debug_info_t *debug; 745 debug_info_t *debug;
750 struct mutex conf_mutex; 746 struct mutex conf_mutex;
751 struct mutex discipline_mutex; 747 struct mutex discipline_mutex;
748 struct napi_struct napi;
749 struct qeth_rx rx;
752}; 750};
753 751
754struct qeth_card_list_struct { 752struct qeth_card_list_struct {
@@ -756,6 +754,14 @@ struct qeth_card_list_struct {
756 rwlock_t rwlock; 754 rwlock_t rwlock;
757}; 755};
758 756
757struct qeth_trap_id {
758 __u16 lparnr;
759 char vmname[8];
760 __u8 chpid;
761 __u8 ssid;
762 __u16 devno;
763} __packed;
764
759/*some helper functions*/ 765/*some helper functions*/
760#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 766#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
761 767
@@ -790,6 +796,12 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
790 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 796 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
791} 797}
792 798
799static inline int qeth_is_diagass_supported(struct qeth_card *card,
800 enum qeth_diags_cmds cmd)
801{
802 return card->info.diagass_support & (__u32)cmd;
803}
804
793extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 805extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
794extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 806extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
795const char *qeth_get_cardname_short(struct qeth_card *); 807const char *qeth_get_cardname_short(struct qeth_card *);
@@ -831,6 +843,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
831 struct qdio_buffer *, struct qdio_buffer_element **, int *, 843 struct qdio_buffer *, struct qdio_buffer_element **, int *,
832 struct qeth_hdr **); 844 struct qeth_hdr **);
833void qeth_schedule_recovery(struct qeth_card *); 845void qeth_schedule_recovery(struct qeth_card *);
846void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
847void qeth_qdio_input_handler(struct ccw_device *,
848 unsigned int, unsigned int, int,
849 int, unsigned long);
834void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 850void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
835 int, int, int, unsigned long); 851 int, int, int, unsigned long);
836void qeth_clear_ipacmd_list(struct qeth_card *); 852void qeth_clear_ipacmd_list(struct qeth_card *);
@@ -871,6 +887,8 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
871int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 887int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
872int qeth_set_access_ctrl_online(struct qeth_card *card); 888int qeth_set_access_ctrl_online(struct qeth_card *card);
873int qeth_hdr_chk_and_bounce(struct sk_buff *, int); 889int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
890int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
891int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
874 892
875/* exports for OSN */ 893/* exports for OSN */
876int qeth_osn_assist(struct net_device *, void *, int); 894int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3a5a18a0fc28..dd08f7b42fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -24,6 +24,7 @@
24 24
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/sysinfo.h>
27 28
28#include "qeth_core.h" 29#include "qeth_core.h"
29 30
@@ -302,12 +303,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
302 int com = cmd->hdr.command; 303 int com = cmd->hdr.command;
303 ipa_name = qeth_get_ipa_cmd_name(com); 304 ipa_name = qeth_get_ipa_cmd_name(com);
304 if (rc) 305 if (rc)
305 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", 306 QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
306 ipa_name, com, QETH_CARD_IFNAME(card), 307 "x%X \"%s\"\n",
307 rc, qeth_get_ipa_msg(rc)); 308 ipa_name, com, dev_name(&card->gdev->dev),
309 QETH_CARD_IFNAME(card), rc,
310 qeth_get_ipa_msg(rc));
308 else 311 else
309 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", 312 QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
310 ipa_name, com, QETH_CARD_IFNAME(card)); 313 ipa_name, com, dev_name(&card->gdev->dev),
314 QETH_CARD_IFNAME(card));
311} 315}
312 316
313static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 317static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -346,6 +350,8 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
346 card->info.chpid); 350 card->info.chpid);
347 netif_carrier_on(card->dev); 351 netif_carrier_on(card->dev);
348 card->lan_online = 1; 352 card->lan_online = 1;
353 if (card->info.hwtrap)
354 card->info.hwtrap = 2;
349 qeth_schedule_recovery(card); 355 qeth_schedule_recovery(card);
350 return NULL; 356 return NULL;
351 case IPA_CMD_MODCCID: 357 case IPA_CMD_MODCCID:
@@ -877,23 +883,21 @@ out:
877 return; 883 return;
878} 884}
879 885
880static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 886static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
881 struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) 887 struct qeth_qdio_out_buffer *buf)
882{ 888{
883 int i; 889 int i;
884 struct sk_buff *skb; 890 struct sk_buff *skb;
885 891
886 /* is PCI flag set on buffer? */ 892 /* is PCI flag set on buffer? */
887 if (buf->buffer->element[0].flags & 0x40) 893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
888 atomic_dec(&queue->set_pci_flags_count); 894 atomic_dec(&queue->set_pci_flags_count);
889 895
890 if (!qeth_skip_skb) { 896 skb = skb_dequeue(&buf->skb_list);
897 while (skb) {
898 atomic_dec(&skb->users);
899 dev_kfree_skb_any(skb);
891 skb = skb_dequeue(&buf->skb_list); 900 skb = skb_dequeue(&buf->skb_list);
892 while (skb) {
893 atomic_dec(&skb->users);
894 dev_kfree_skb_any(skb);
895 skb = skb_dequeue(&buf->skb_list);
896 }
897 } 901 }
898 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 902 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
899 if (buf->buffer->element[i].addr && buf->is_header[i]) 903 if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -902,19 +906,15 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
902 buf->is_header[i] = 0; 906 buf->is_header[i] = 0;
903 buf->buffer->element[i].length = 0; 907 buf->buffer->element[i].length = 0;
904 buf->buffer->element[i].addr = NULL; 908 buf->buffer->element[i].addr = NULL;
905 buf->buffer->element[i].flags = 0; 909 buf->buffer->element[i].eflags = 0;
910 buf->buffer->element[i].sflags = 0;
906 } 911 }
907 buf->buffer->element[15].flags = 0; 912 buf->buffer->element[15].eflags = 0;
913 buf->buffer->element[15].sflags = 0;
908 buf->next_element_to_fill = 0; 914 buf->next_element_to_fill = 0;
909 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
910} 916}
911 917
912static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
913 struct qeth_qdio_out_buffer *buf)
914{
915 __qeth_clear_output_buffer(queue, buf, 0);
916}
917
918void qeth_clear_qdio_buffers(struct qeth_card *card) 918void qeth_clear_qdio_buffers(struct qeth_card *card)
919{ 919{
920 int i, j; 920 int i, j;
@@ -996,16 +996,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
996 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 996 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
997 if (chp_dsc != NULL) { 997 if (chp_dsc != NULL) {
998 /* CHPP field bit 6 == 1 -> single queue */ 998 /* CHPP field bit 6 == 1 -> single queue */
999 if ((chp_dsc->chpp & 0x02) == 0x02) 999 if ((chp_dsc->chpp & 0x02) == 0x02) {
1000 if ((atomic_read(&card->qdio.state) !=
1001 QETH_QDIO_UNINITIALIZED) &&
1002 (card->qdio.no_out_queues == 4))
1003 /* change from 4 to 1 outbound queues */
1004 qeth_free_qdio_buffers(card);
1000 card->qdio.no_out_queues = 1; 1005 card->qdio.no_out_queues = 1;
1006 if (card->qdio.default_out_queue != 0)
1007 dev_info(&card->gdev->dev,
1008 "Priority Queueing not supported\n");
1009 card->qdio.default_out_queue = 0;
1010 } else {
1011 if ((atomic_read(&card->qdio.state) !=
1012 QETH_QDIO_UNINITIALIZED) &&
1013 (card->qdio.no_out_queues == 1)) {
1014 /* change from 1 to 4 outbound queues */
1015 qeth_free_qdio_buffers(card);
1016 card->qdio.default_out_queue = 2;
1017 }
1018 card->qdio.no_out_queues = 4;
1019 }
1001 card->info.func_level = 0x4100 + chp_dsc->desc; 1020 card->info.func_level = 0x4100 + chp_dsc->desc;
1002 kfree(chp_dsc); 1021 kfree(chp_dsc);
1003 } 1022 }
1004 if (card->qdio.no_out_queues == 1) {
1005 card->qdio.default_out_queue = 0;
1006 dev_info(&card->gdev->dev,
1007 "Priority Queueing not supported\n");
1008 }
1009 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1023 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1010 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1024 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1011 return; 1025 return;
@@ -1017,7 +1031,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
1017 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1031 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1018 /* inbound */ 1032 /* inbound */
1019 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1033 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1020 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1034 if (card->info.type == QETH_CARD_TYPE_IQD)
1035 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1036 else
1037 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1021 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1038 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1022 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1039 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1023 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1040 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
@@ -1027,7 +1044,6 @@ static void qeth_set_intial_options(struct qeth_card *card)
1027{ 1044{
1028 card->options.route4.type = NO_ROUTER; 1045 card->options.route4.type = NO_ROUTER;
1029 card->options.route6.type = NO_ROUTER; 1046 card->options.route6.type = NO_ROUTER;
1030 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1031 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; 1047 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1032 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; 1048 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1033 card->options.fake_broadcast = 0; 1049 card->options.fake_broadcast = 0;
@@ -1077,7 +1093,6 @@ static int qeth_setup_card(struct qeth_card *card)
1077 card->data.state = CH_STATE_DOWN; 1093 card->data.state = CH_STATE_DOWN;
1078 card->state = CARD_STATE_DOWN; 1094 card->state = CARD_STATE_DOWN;
1079 card->lan_online = 0; 1095 card->lan_online = 0;
1080 card->use_hard_stop = 0;
1081 card->read_or_write_problem = 0; 1096 card->read_or_write_problem = 0;
1082 card->dev = NULL; 1097 card->dev = NULL;
1083 spin_lock_init(&card->vlanlock); 1098 spin_lock_init(&card->vlanlock);
@@ -1096,7 +1111,7 @@ static int qeth_setup_card(struct qeth_card *card)
1096 INIT_LIST_HEAD(card->ip_tbd_list); 1111 INIT_LIST_HEAD(card->ip_tbd_list);
1097 INIT_LIST_HEAD(&card->cmd_waiter_list); 1112 INIT_LIST_HEAD(&card->cmd_waiter_list);
1098 init_waitqueue_head(&card->wait_q); 1113 init_waitqueue_head(&card->wait_q);
1099 /* intial options */ 1114 /* initial options */
1100 qeth_set_intial_options(card); 1115 qeth_set_intial_options(card);
1101 /* IP address takeover */ 1116 /* IP address takeover */
1102 INIT_LIST_HEAD(&card->ipato.entries); 1117 INIT_LIST_HEAD(&card->ipato.entries);
@@ -1726,20 +1741,22 @@ int qeth_send_control_data(struct qeth_card *card, int len,
1726 }; 1741 };
1727 } 1742 }
1728 1743
1744 if (reply->rc == -EIO)
1745 goto error;
1729 rc = reply->rc; 1746 rc = reply->rc;
1730 qeth_put_reply(reply); 1747 qeth_put_reply(reply);
1731 return rc; 1748 return rc;
1732 1749
1733time_err: 1750time_err:
1751 reply->rc = -ETIME;
1734 spin_lock_irqsave(&reply->card->lock, flags); 1752 spin_lock_irqsave(&reply->card->lock, flags);
1735 list_del_init(&reply->list); 1753 list_del_init(&reply->list);
1736 spin_unlock_irqrestore(&reply->card->lock, flags); 1754 spin_unlock_irqrestore(&reply->card->lock, flags);
1737 reply->rc = -ETIME;
1738 atomic_inc(&reply->received); 1755 atomic_inc(&reply->received);
1756error:
1739 atomic_set(&card->write.irq_pending, 0); 1757 atomic_set(&card->write.irq_pending, 0);
1740 qeth_release_buffer(iob->channel, iob); 1758 qeth_release_buffer(iob->channel, iob);
1741 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; 1759 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
1742 wake_up(&reply->wait_q);
1743 rc = reply->rc; 1760 rc = reply->rc;
1744 qeth_put_reply(reply); 1761 qeth_put_reply(reply);
1745 return rc; 1762 return rc;
@@ -1840,33 +1857,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1840 } 1857 }
1841} 1858}
1842 1859
1843static inline int qeth_get_max_mtu_for_card(int cardtype)
1844{
1845 switch (cardtype) {
1846
1847 case QETH_CARD_TYPE_UNKNOWN:
1848 case QETH_CARD_TYPE_OSD:
1849 case QETH_CARD_TYPE_OSN:
1850 case QETH_CARD_TYPE_OSM:
1851 case QETH_CARD_TYPE_OSX:
1852 return 61440;
1853 case QETH_CARD_TYPE_IQD:
1854 return 57344;
1855 default:
1856 return 1500;
1857 }
1858}
1859
1860static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1861{
1862 switch (cardtype) {
1863 case QETH_CARD_TYPE_IQD:
1864 return 1;
1865 default:
1866 return 0;
1867 }
1868}
1869
1870static inline int qeth_get_mtu_outof_framesize(int framesize) 1860static inline int qeth_get_mtu_outof_framesize(int framesize)
1871{ 1861{
1872 switch (framesize) { 1862 switch (framesize) {
@@ -1889,10 +1879,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1889 case QETH_CARD_TYPE_OSD: 1879 case QETH_CARD_TYPE_OSD:
1890 case QETH_CARD_TYPE_OSM: 1880 case QETH_CARD_TYPE_OSM:
1891 case QETH_CARD_TYPE_OSX: 1881 case QETH_CARD_TYPE_OSX:
1892 return ((mtu >= 576) && (mtu <= 61440));
1893 case QETH_CARD_TYPE_IQD: 1882 case QETH_CARD_TYPE_IQD:
1894 return ((mtu >= 576) && 1883 return ((mtu >= 576) &&
1895 (mtu <= card->info.max_mtu + 4096 - 32)); 1884 (mtu <= card->info.max_mtu));
1896 case QETH_CARD_TYPE_OSN: 1885 case QETH_CARD_TYPE_OSN:
1897 case QETH_CARD_TYPE_UNKNOWN: 1886 case QETH_CARD_TYPE_UNKNOWN:
1898 default: 1887 default:
@@ -1915,7 +1904,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1915 memcpy(&card->token.ulp_filter_r, 1904 memcpy(&card->token.ulp_filter_r,
1916 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 1905 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1917 QETH_MPC_TOKEN_LENGTH); 1906 QETH_MPC_TOKEN_LENGTH);
1918 if (qeth_get_mtu_out_of_mpc(card->info.type)) { 1907 if (card->info.type == QETH_CARD_TYPE_IQD) {
1919 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 1908 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1920 mtu = qeth_get_mtu_outof_framesize(framesize); 1909 mtu = qeth_get_mtu_outof_framesize(framesize);
1921 if (!mtu) { 1910 if (!mtu) {
@@ -1923,12 +1912,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1923 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); 1912 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1924 return 0; 1913 return 0;
1925 } 1914 }
1926 card->info.max_mtu = mtu; 1915 if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
1916 /* frame size has changed */
1917 if (card->dev &&
1918 ((card->dev->mtu == card->info.initial_mtu) ||
1919 (card->dev->mtu > mtu)))
1920 card->dev->mtu = mtu;
1921 qeth_free_qdio_buffers(card);
1922 }
1927 card->info.initial_mtu = mtu; 1923 card->info.initial_mtu = mtu;
1924 card->info.max_mtu = mtu;
1928 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; 1925 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1929 } else { 1926 } else {
1930 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); 1927 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1931 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); 1928 card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
1929 iob->data);
1932 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1930 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1933 } 1931 }
1934 1932
@@ -2372,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2372 buf->buffer->element[i].length = PAGE_SIZE; 2370 buf->buffer->element[i].length = PAGE_SIZE;
2373 buf->buffer->element[i].addr = pool_entry->elements[i]; 2371 buf->buffer->element[i].addr = pool_entry->elements[i];
2374 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2372 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2375 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; 2373 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2376 else 2374 else
2377 buf->buffer->element[i].flags = 0; 2375 buf->buffer->element[i].eflags = 0;
2376 buf->buffer->element[i].sflags = 0;
2378 } 2377 }
2379 return 0; 2378 return 0;
2380} 2379}
@@ -2503,45 +2502,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2503} 2502}
2504EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2503EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2505 2504
2506static int qeth_send_startstoplan(struct qeth_card *card,
2507 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2508{
2509 int rc;
2510 struct qeth_cmd_buffer *iob;
2511
2512 iob = qeth_get_ipacmd_buffer(card, ipacmd, prot);
2513 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2514
2515 return rc;
2516}
2517
2518int qeth_send_startlan(struct qeth_card *card) 2505int qeth_send_startlan(struct qeth_card *card)
2519{ 2506{
2520 int rc; 2507 int rc;
2508 struct qeth_cmd_buffer *iob;
2521 2509
2522 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 2510 QETH_DBF_TEXT(SETUP, 2, "strtlan");
2523 2511
2524 rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); 2512 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2513 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
2525 return rc; 2514 return rc;
2526} 2515}
2527EXPORT_SYMBOL_GPL(qeth_send_startlan); 2516EXPORT_SYMBOL_GPL(qeth_send_startlan);
2528 2517
2529int qeth_send_stoplan(struct qeth_card *card)
2530{
2531 int rc = 0;
2532
2533 /*
2534 * TODO: according to the IPA format document page 14,
2535 * TCP/IP (we!) never issue a STOPLAN
2536 * is this right ?!?
2537 */
2538 QETH_DBF_TEXT(SETUP, 2, "stoplan");
2539
2540 rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0);
2541 return rc;
2542}
2543EXPORT_SYMBOL_GPL(qeth_send_stoplan);
2544
2545int qeth_default_setadapterparms_cb(struct qeth_card *card, 2518int qeth_default_setadapterparms_cb(struct qeth_card *card,
2546 struct qeth_reply *reply, unsigned long data) 2519 struct qeth_reply *reply, unsigned long data)
2547{ 2520{
@@ -2606,17 +2579,153 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2606} 2579}
2607EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2580EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2608 2581
2582static int qeth_query_ipassists_cb(struct qeth_card *card,
2583 struct qeth_reply *reply, unsigned long data)
2584{
2585 struct qeth_ipa_cmd *cmd;
2586
2587 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2588
2589 cmd = (struct qeth_ipa_cmd *) data;
2590 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2591 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2592 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2593 } else {
2594 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2595 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2596 }
2597 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
2598 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
2599 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
2600 return 0;
2601}
2602
2603int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
2604{
2605 int rc;
2606 struct qeth_cmd_buffer *iob;
2607
2608 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
2609 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
2610 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2611 return rc;
2612}
2613EXPORT_SYMBOL_GPL(qeth_query_ipassists);
2614
2615static int qeth_query_setdiagass_cb(struct qeth_card *card,
2616 struct qeth_reply *reply, unsigned long data)
2617{
2618 struct qeth_ipa_cmd *cmd;
2619 __u16 rc;
2620
2621 cmd = (struct qeth_ipa_cmd *)data;
2622 rc = cmd->hdr.return_code;
2623 if (rc)
2624 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
2625 else
2626 card->info.diagass_support = cmd->data.diagass.ext;
2627 return 0;
2628}
2629
2630static int qeth_query_setdiagass(struct qeth_card *card)
2631{
2632 struct qeth_cmd_buffer *iob;
2633 struct qeth_ipa_cmd *cmd;
2634
2635 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
2636 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2637 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2638 cmd->data.diagass.subcmd_len = 16;
2639 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
2640 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
2641}
2642
2643static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
2644{
2645 unsigned long info = get_zeroed_page(GFP_KERNEL);
2646 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
2647 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
2648 struct ccw_dev_id ccwid;
2649 int level, rc;
2650
2651 tid->chpid = card->info.chpid;
2652 ccw_device_get_id(CARD_RDEV(card), &ccwid);
2653 tid->ssid = ccwid.ssid;
2654 tid->devno = ccwid.devno;
2655 if (!info)
2656 return;
2657
2658 rc = stsi(NULL, 0, 0, 0);
2659 if (rc == -ENOSYS)
2660 level = rc;
2661 else
2662 level = (((unsigned int) rc) >> 28);
2663
2664 if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
2665 tid->lparnr = info222->lpar_number;
2666
2667 if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
2668 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
2669 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
2670 }
2671 free_page(info);
2672 return;
2673}
2674
2675static int qeth_hw_trap_cb(struct qeth_card *card,
2676 struct qeth_reply *reply, unsigned long data)
2677{
2678 struct qeth_ipa_cmd *cmd;
2679 __u16 rc;
2680
2681 cmd = (struct qeth_ipa_cmd *)data;
2682 rc = cmd->hdr.return_code;
2683 if (rc)
2684 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
2685 return 0;
2686}
2687
2688int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
2689{
2690 struct qeth_cmd_buffer *iob;
2691 struct qeth_ipa_cmd *cmd;
2692
2693 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
2694 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2695 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2696 cmd->data.diagass.subcmd_len = 80;
2697 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
2698 cmd->data.diagass.type = 1;
2699 cmd->data.diagass.action = action;
2700 switch (action) {
2701 case QETH_DIAGS_TRAP_ARM:
2702 cmd->data.diagass.options = 0x0003;
2703 cmd->data.diagass.ext = 0x00010000 +
2704 sizeof(struct qeth_trap_id);
2705 qeth_get_trap_id(card,
2706 (struct qeth_trap_id *)cmd->data.diagass.cdata);
2707 break;
2708 case QETH_DIAGS_TRAP_DISARM:
2709 cmd->data.diagass.options = 0x0001;
2710 break;
2711 case QETH_DIAGS_TRAP_CAPTURE:
2712 break;
2713 }
2714 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
2715}
2716EXPORT_SYMBOL_GPL(qeth_hw_trap);
2717
2609int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, 2718int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2610 unsigned int qdio_error, const char *dbftext) 2719 unsigned int qdio_error, const char *dbftext)
2611{ 2720{
2612 if (qdio_error) { 2721 if (qdio_error) {
2613 QETH_CARD_TEXT(card, 2, dbftext); 2722 QETH_CARD_TEXT(card, 2, dbftext);
2614 QETH_CARD_TEXT_(card, 2, " F15=%02X", 2723 QETH_CARD_TEXT_(card, 2, " F15=%02X",
2615 buf->element[15].flags & 0xff); 2724 buf->element[15].sflags);
2616 QETH_CARD_TEXT_(card, 2, " F14=%02X", 2725 QETH_CARD_TEXT_(card, 2, " F14=%02X",
2617 buf->element[14].flags & 0xff); 2726 buf->element[14].sflags);
2618 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 2727 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
2619 if ((buf->element[15].flags & 0xff) == 0x12) { 2728 if ((buf->element[15].sflags) == 0x12) {
2620 card->stats.rx_dropped++; 2729 card->stats.rx_dropped++;
2621 return 0; 2730 return 0;
2622 } else 2731 } else
@@ -2692,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2692static int qeth_handle_send_error(struct qeth_card *card, 2801static int qeth_handle_send_error(struct qeth_card *card,
2693 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 2802 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2694{ 2803{
2695 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2804 int sbalf15 = buffer->buffer->element[15].sflags;
2696 2805
2697 QETH_CARD_TEXT(card, 6, "hdsnderr"); 2806 QETH_CARD_TEXT(card, 6, "hdsnderr");
2698 if (card->info.type == QETH_CARD_TYPE_IQD) { 2807 if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2801,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2801 2910
2802 for (i = index; i < index + count; ++i) { 2911 for (i = index; i < index + count; ++i) {
2803 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2912 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2804 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2913 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
2805 SBAL_FLAGS_LAST_ENTRY; 2914 SBAL_EFLAGS_LAST_ENTRY;
2806 2915
2807 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 2916 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2808 continue; 2917 continue;
@@ -2815,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2815 /* it's likely that we'll go to packing 2924 /* it's likely that we'll go to packing
2816 * mode soon */ 2925 * mode soon */
2817 atomic_inc(&queue->set_pci_flags_count); 2926 atomic_inc(&queue->set_pci_flags_count);
2818 buf->buffer->element[0].flags |= 0x40; 2927 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2819 } 2928 }
2820 } else { 2929 } else {
2821 if (!atomic_read(&queue->set_pci_flags_count)) { 2930 if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2828,12 +2937,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2828 * further send was requested by the stack 2937 * further send was requested by the stack
2829 */ 2938 */
2830 atomic_inc(&queue->set_pci_flags_count); 2939 atomic_inc(&queue->set_pci_flags_count);
2831 buf->buffer->element[0].flags |= 0x40; 2940 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2832 } 2941 }
2833 } 2942 }
2834 } 2943 }
2835 2944
2836 queue->sync_iqdio_error = 0;
2837 queue->card->dev->trans_start = jiffies; 2945 queue->card->dev->trans_start = jiffies;
2838 if (queue->card->options.performance_stats) { 2946 if (queue->card->options.performance_stats) {
2839 queue->card->perf_stats.outbound_do_qdio_cnt++; 2947 queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2957,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2849 queue->card->perf_stats.outbound_do_qdio_time += 2957 queue->card->perf_stats.outbound_do_qdio_time +=
2850 qeth_get_micros() - 2958 qeth_get_micros() -
2851 queue->card->perf_stats.outbound_do_qdio_start_time; 2959 queue->card->perf_stats.outbound_do_qdio_start_time;
2852 if (rc > 0) { 2960 atomic_add(count, &queue->used_buffers);
2853 if (!(rc & QDIO_ERROR_SIGA_BUSY))
2854 queue->sync_iqdio_error = rc & 3;
2855 }
2856 if (rc) { 2961 if (rc) {
2857 queue->card->stats.tx_errors += count; 2962 queue->card->stats.tx_errors += count;
2858 /* ignore temporary SIGA errors without busy condition */ 2963 /* ignore temporary SIGA errors without busy condition */
@@ -2866,7 +2971,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2866 qeth_schedule_recovery(queue->card); 2971 qeth_schedule_recovery(queue->card);
2867 return; 2972 return;
2868 } 2973 }
2869 atomic_add(count, &queue->used_buffers);
2870 if (queue->card->options.performance_stats) 2974 if (queue->card->options.performance_stats)
2871 queue->card->perf_stats.bufs_sent += count; 2975 queue->card->perf_stats.bufs_sent += count;
2872} 2976}
@@ -2911,6 +3015,27 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2911 } 3015 }
2912} 3016}
2913 3017
3018void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3019 unsigned long card_ptr)
3020{
3021 struct qeth_card *card = (struct qeth_card *)card_ptr;
3022
3023 if (card->dev && (card->dev->flags & IFF_UP))
3024 napi_schedule(&card->napi);
3025}
3026EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
3027
3028void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
3029 unsigned int queue, int first_element, int count,
3030 unsigned long card_ptr)
3031{
3032 struct qeth_card *card = (struct qeth_card *)card_ptr;
3033
3034 if (qdio_err)
3035 qeth_schedule_recovery(card);
3036}
3037EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
3038
2914void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3039void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2915 unsigned int qdio_error, int __queue, int first_element, 3040 unsigned int qdio_error, int __queue, int first_element,
2916 int count, unsigned long card_ptr) 3041 int count, unsigned long card_ptr)
@@ -2919,7 +3044,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2919 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3044 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2920 struct qeth_qdio_out_buffer *buffer; 3045 struct qeth_qdio_out_buffer *buffer;
2921 int i; 3046 int i;
2922 unsigned qeth_send_err;
2923 3047
2924 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3048 QETH_CARD_TEXT(card, 6, "qdouhdl");
2925 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 3049 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2935,9 +3059,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2935 } 3059 }
2936 for (i = first_element; i < (first_element + count); ++i) { 3060 for (i = first_element; i < (first_element + count); ++i) {
2937 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 3061 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2938 qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); 3062 qeth_handle_send_error(card, buffer, qdio_error);
2939 __qeth_clear_output_buffer(queue, buffer, 3063 qeth_clear_output_buffer(queue, buffer);
2940 (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
2941 } 3064 }
2942 atomic_sub(count, &queue->used_buffers); 3065 atomic_sub(count, &queue->used_buffers);
2943 /* check if we need to do something on this outbound queue */ 3066 /* check if we need to do something on this outbound queue */
@@ -3060,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3060 if (!length) { 3183 if (!length) {
3061 if (first_lap) 3184 if (first_lap)
3062 if (skb_shinfo(skb)->nr_frags) 3185 if (skb_shinfo(skb)->nr_frags)
3063 buffer->element[element].flags = 3186 buffer->element[element].eflags =
3064 SBAL_FLAGS_FIRST_FRAG; 3187 SBAL_EFLAGS_FIRST_FRAG;
3065 else 3188 else
3066 buffer->element[element].flags = 0; 3189 buffer->element[element].eflags = 0;
3067 else 3190 else
3068 buffer->element[element].flags = 3191 buffer->element[element].eflags =
3069 SBAL_FLAGS_MIDDLE_FRAG; 3192 SBAL_EFLAGS_MIDDLE_FRAG;
3070 } else { 3193 } else {
3071 if (first_lap) 3194 if (first_lap)
3072 buffer->element[element].flags = 3195 buffer->element[element].eflags =
3073 SBAL_FLAGS_FIRST_FRAG; 3196 SBAL_EFLAGS_FIRST_FRAG;
3074 else 3197 else
3075 buffer->element[element].flags = 3198 buffer->element[element].eflags =
3076 SBAL_FLAGS_MIDDLE_FRAG; 3199 SBAL_EFLAGS_MIDDLE_FRAG;
3077 } 3200 }
3078 data += length_here; 3201 data += length_here;
3079 element++; 3202 element++;
@@ -3085,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3085 buffer->element[element].addr = (char *)page_to_phys(frag->page) 3208 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3086 + frag->page_offset; 3209 + frag->page_offset;
3087 buffer->element[element].length = frag->size; 3210 buffer->element[element].length = frag->size;
3088 buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; 3211 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
3089 element++; 3212 element++;
3090 } 3213 }
3091 3214
3092 if (buffer->element[element - 1].flags) 3215 if (buffer->element[element - 1].eflags)
3093 buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; 3216 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3094 *next_element_to_fill = element; 3217 *next_element_to_fill = element;
3095} 3218}
3096 3219
@@ -3114,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3114 /*fill first buffer entry only with header information */ 3237 /*fill first buffer entry only with header information */
3115 buffer->element[element].addr = skb->data; 3238 buffer->element[element].addr = skb->data;
3116 buffer->element[element].length = hdr_len; 3239 buffer->element[element].length = hdr_len;
3117 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3240 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3118 buf->next_element_to_fill++; 3241 buf->next_element_to_fill++;
3119 skb->data += hdr_len; 3242 skb->data += hdr_len;
3120 skb->len -= hdr_len; 3243 skb->len -= hdr_len;
@@ -3126,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3126 buffer->element[element].addr = hdr; 3249 buffer->element[element].addr = hdr;
3127 buffer->element[element].length = sizeof(struct qeth_hdr) + 3250 buffer->element[element].length = sizeof(struct qeth_hdr) +
3128 hd_len; 3251 hd_len;
3129 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3252 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3130 buf->is_header[element] = 1; 3253 buf->is_header[element] = 1;
3131 buf->next_element_to_fill++; 3254 buf->next_element_to_fill++;
3132 } 3255 }
@@ -3162,10 +3285,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3162 int offset, int hd_len) 3285 int offset, int hd_len)
3163{ 3286{
3164 struct qeth_qdio_out_buffer *buffer; 3287 struct qeth_qdio_out_buffer *buffer;
3165 struct sk_buff *skb1;
3166 struct qeth_skb_data *retry_ctrl;
3167 int index; 3288 int index;
3168 int rc;
3169 3289
3170 /* spin until we get the queue ... */ 3290 /* spin until we get the queue ... */
3171 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3291 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3184,25 +3304,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3184 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3304 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3185 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3305 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3186 qeth_flush_buffers(queue, index, 1); 3306 qeth_flush_buffers(queue, index, 1);
3187 if (queue->sync_iqdio_error == 2) {
3188 skb1 = skb_dequeue(&buffer->skb_list);
3189 while (skb1) {
3190 atomic_dec(&skb1->users);
3191 skb1 = skb_dequeue(&buffer->skb_list);
3192 }
3193 retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
3194 if (retry_ctrl->magic != QETH_SKB_MAGIC) {
3195 retry_ctrl->magic = QETH_SKB_MAGIC;
3196 retry_ctrl->count = 0;
3197 }
3198 if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
3199 retry_ctrl->count++;
3200 rc = dev_queue_xmit(skb);
3201 } else {
3202 dev_kfree_skb_any(skb);
3203 QETH_CARD_TEXT(card, 2, "qrdrop");
3204 }
3205 }
3206 return 0; 3307 return 0;
3207out: 3308out:
3208 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3309 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3791,6 +3892,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3791 } 3892 }
3792} 3893}
3793 3894
3895static void qeth_determine_capabilities(struct qeth_card *card)
3896{
3897 int rc;
3898 int length;
3899 char *prcd;
3900 struct ccw_device *ddev;
3901 int ddev_offline = 0;
3902
3903 QETH_DBF_TEXT(SETUP, 2, "detcapab");
3904 ddev = CARD_DDEV(card);
3905 if (!ddev->online) {
3906 ddev_offline = 1;
3907 rc = ccw_device_set_online(ddev);
3908 if (rc) {
3909 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3910 goto out;
3911 }
3912 }
3913
3914 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
3915 if (rc) {
3916 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
3917 dev_name(&card->gdev->dev), rc);
3918 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3919 goto out_offline;
3920 }
3921 qeth_configure_unitaddr(card, prcd);
3922 qeth_configure_blkt_default(card, prcd);
3923 kfree(prcd);
3924
3925 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
3926 if (rc)
3927 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3928
3929out_offline:
3930 if (ddev_offline == 1)
3931 ccw_device_set_offline(ddev);
3932out:
3933 return;
3934}
3935
3794static int qeth_qdio_establish(struct qeth_card *card) 3936static int qeth_qdio_establish(struct qeth_card *card)
3795{ 3937{
3796 struct qdio_initialize init_data; 3938 struct qdio_initialize init_data;
@@ -3843,9 +3985,12 @@ static int qeth_qdio_establish(struct qeth_card *card)
3843 init_data.no_output_qs = card->qdio.no_out_queues; 3985 init_data.no_output_qs = card->qdio.no_out_queues;
3844 init_data.input_handler = card->discipline.input_handler; 3986 init_data.input_handler = card->discipline.input_handler;
3845 init_data.output_handler = card->discipline.output_handler; 3987 init_data.output_handler = card->discipline.output_handler;
3988 init_data.queue_start_poll = card->discipline.start_poll;
3846 init_data.int_parm = (unsigned long) card; 3989 init_data.int_parm = (unsigned long) card;
3847 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3990 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3848 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3991 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3992 init_data.scan_threshold =
3993 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
3849 3994
3850 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 3995 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3851 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 3996 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -3898,7 +4043,10 @@ static struct ccw_device_id qeth_ids[] = {
3898MODULE_DEVICE_TABLE(ccw, qeth_ids); 4043MODULE_DEVICE_TABLE(ccw, qeth_ids);
3899 4044
3900static struct ccw_driver qeth_ccw_driver = { 4045static struct ccw_driver qeth_ccw_driver = {
3901 .name = "qeth", 4046 .driver = {
4047 .owner = THIS_MODULE,
4048 .name = "qeth",
4049 },
3902 .ids = qeth_ids, 4050 .ids = qeth_ids,
3903 .probe = ccwgroup_probe_ccwdev, 4051 .probe = ccwgroup_probe_ccwdev,
3904 .remove = ccwgroup_remove_ccwdev, 4052 .remove = ccwgroup_remove_ccwdev,
@@ -3918,6 +4066,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
3918 4066
3919 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 4067 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3920 atomic_set(&card->force_alloc_skb, 0); 4068 atomic_set(&card->force_alloc_skb, 0);
4069 qeth_get_channel_path_desc(card);
3921retry: 4070retry:
3922 if (retries) 4071 if (retries)
3923 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 4072 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3946,6 +4095,7 @@ retriable:
3946 else 4095 else
3947 goto retry; 4096 goto retry;
3948 } 4097 }
4098 qeth_determine_capabilities(card);
3949 qeth_init_tokens(card); 4099 qeth_init_tokens(card);
3950 qeth_init_func_level(card); 4100 qeth_init_func_level(card);
3951 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 4101 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3976,6 +4126,15 @@ retriable:
3976 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 4126 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3977 goto out; 4127 goto out;
3978 } 4128 }
4129
4130 card->options.ipa4.supported_funcs = 0;
4131 card->options.adp.supported_funcs = 0;
4132 card->info.diagass_support = 0;
4133 qeth_query_ipassists(card, QETH_PROT_IPV4);
4134 if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
4135 qeth_query_setadapterparms(card);
4136 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
4137 qeth_query_setdiagass(card);
3979 return 0; 4138 return 0;
3980out: 4139out:
3981 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 4140 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -4215,41 +4374,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
4215 card->discipline.ccwgdriver = NULL; 4374 card->discipline.ccwgdriver = NULL;
4216} 4375}
4217 4376
4218static void qeth_determine_capabilities(struct qeth_card *card)
4219{
4220 int rc;
4221 int length;
4222 char *prcd;
4223
4224 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4225 rc = ccw_device_set_online(CARD_DDEV(card));
4226 if (rc) {
4227 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4228 goto out;
4229 }
4230
4231
4232 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4233 if (rc) {
4234 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4235 dev_name(&card->gdev->dev), rc);
4236 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4237 goto out_offline;
4238 }
4239 qeth_configure_unitaddr(card, prcd);
4240 qeth_configure_blkt_default(card, prcd);
4241 kfree(prcd);
4242
4243 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4244 if (rc)
4245 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4246
4247out_offline:
4248 ccw_device_set_offline(CARD_DDEV(card));
4249out:
4250 return;
4251}
4252
4253static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4377static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4254{ 4378{
4255 struct qeth_card *card; 4379 struct qeth_card *card;
@@ -4457,8 +4581,10 @@ static int qeth_core_restore(struct ccwgroup_device *gdev)
4457} 4581}
4458 4582
4459static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 4583static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
4460 .owner = THIS_MODULE, 4584 .driver = {
4461 .name = "qeth", 4585 .owner = THIS_MODULE,
4586 .name = "qeth",
4587 },
4462 .driver_id = 0xD8C5E3C8, 4588 .driver_id = 0xD8C5E3C8,
4463 .probe = qeth_core_probe_device, 4589 .probe = qeth_core_probe_device,
4464 .remove = qeth_core_remove_device, 4590 .remove = qeth_core_remove_device,
@@ -4513,8 +4639,8 @@ static struct {
4513/* 20 */{"queue 1 buffer usage"}, 4639/* 20 */{"queue 1 buffer usage"},
4514 {"queue 2 buffer usage"}, 4640 {"queue 2 buffer usage"},
4515 {"queue 3 buffer usage"}, 4641 {"queue 3 buffer usage"},
4516 {"rx handler time"}, 4642 {"rx poll time"},
4517 {"rx handler count"}, 4643 {"rx poll count"},
4518 {"rx do_QDIO time"}, 4644 {"rx do_QDIO time"},
4519 {"rx do_QDIO count"}, 4645 {"rx do_QDIO count"},
4520 {"tx handler time"}, 4646 {"tx handler time"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e37dd8c4bf4e..e5a9d1c03839 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -80,14 +80,6 @@ enum qeth_tr_broadcast_modes {
80 QETH_TR_BROADCAST_LOCAL = 1, 80 QETH_TR_BROADCAST_LOCAL = 1,
81}; 81};
82 82
83/* these values match CHECKSUM_* in include/linux/skbuff.h */
84enum qeth_checksum_types {
85 SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
86 HW_CHECKSUMMING = 1,
87 NO_CHECKSUMMING = 2,
88};
89#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
90
91/* 83/*
92 * Routing stuff 84 * Routing stuff
93 */ 85 */
@@ -333,7 +325,7 @@ struct qeth_arp_query_data {
333 __u16 request_bits; 325 __u16 request_bits;
334 __u16 reply_bits; 326 __u16 reply_bits;
335 __u32 no_entries; 327 __u32 no_entries;
336 char data; 328 char data; /* only for replies */
337} __attribute__((packed)); 329} __attribute__((packed));
338 330
339/* used as parameter for arp_query reply */ 331/* used as parameter for arp_query reply */
@@ -456,6 +448,12 @@ enum qeth_diags_trace_cmds {
456 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010, 448 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
457}; 449};
458 450
451enum qeth_diags_trap_action {
452 QETH_DIAGS_TRAP_ARM = 0x01,
453 QETH_DIAGS_TRAP_DISARM = 0x02,
454 QETH_DIAGS_TRAP_CAPTURE = 0x04,
455};
456
459struct qeth_ipacmd_diagass { 457struct qeth_ipacmd_diagass {
460 __u32 host_tod2; 458 __u32 host_tod2;
461 __u32:32; 459 __u32:32;
@@ -465,7 +463,8 @@ struct qeth_ipacmd_diagass {
465 __u8 type; 463 __u8 type;
466 __u8 action; 464 __u8 action;
467 __u16 options; 465 __u16 options;
468 __u32:32; 466 __u32 ext;
467 __u8 cdata[64];
469} __attribute__ ((packed)); 468} __attribute__ ((packed));
470 469
471/* Header for each IPA command */ 470/* Header for each IPA command */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 42fa783a70c8..0a8e86c1b0ea 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -372,7 +372,7 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
372 i = simple_strtoul(buf, &tmp, 16); 372 i = simple_strtoul(buf, &tmp, 16);
373 if ((i == 0) || (i == 1)) { 373 if ((i == 0) || (i == 1)) {
374 if (i == card->options.performance_stats) 374 if (i == card->options.performance_stats)
375 goto out;; 375 goto out;
376 card->options.performance_stats = i; 376 card->options.performance_stats = i;
377 if (i == 0) 377 if (i == 0)
378 memset(&card->perf_stats, 0, 378 memset(&card->perf_stats, 0,
@@ -530,6 +530,66 @@ out:
530static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, 530static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
531 qeth_dev_isolation_store); 531 qeth_dev_isolation_store);
532 532
533static ssize_t qeth_hw_trap_show(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct qeth_card *card = dev_get_drvdata(dev);
537
538 if (!card)
539 return -EINVAL;
540 if (card->info.hwtrap)
541 return snprintf(buf, 5, "arm\n");
542 else
543 return snprintf(buf, 8, "disarm\n");
544}
545
546static ssize_t qeth_hw_trap_store(struct device *dev,
547 struct device_attribute *attr, const char *buf, size_t count)
548{
549 struct qeth_card *card = dev_get_drvdata(dev);
550 int rc = 0;
551 char *tmp, *curtoken;
552 int state = 0;
553 curtoken = (char *)buf;
554
555 if (!card)
556 return -EINVAL;
557
558 mutex_lock(&card->conf_mutex);
559 if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
560 state = 1;
561 tmp = strsep(&curtoken, "\n");
562
563 if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
564 if (state) {
565 if (qeth_is_diagass_supported(card,
566 QETH_DIAGS_CMD_TRAP)) {
567 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
568 if (!rc)
569 card->info.hwtrap = 1;
570 } else
571 rc = -EINVAL;
572 } else
573 card->info.hwtrap = 1;
574 } else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
575 if (state) {
576 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
577 if (!rc)
578 card->info.hwtrap = 0;
579 } else
580 card->info.hwtrap = 0;
581 } else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
582 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
583 else
584 rc = -EINVAL;
585
586 mutex_unlock(&card->conf_mutex);
587 return rc ? rc : count;
588}
589
590static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
591 qeth_hw_trap_store);
592
533static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 593static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
534{ 594{
535 595
@@ -653,6 +713,7 @@ static struct attribute *qeth_device_attrs[] = {
653 &dev_attr_performance_stats.attr, 713 &dev_attr_performance_stats.attr,
654 &dev_attr_layer2.attr, 714 &dev_attr_layer2.attr,
655 &dev_attr_isolation.attr, 715 &dev_attr_isolation.attr,
716 &dev_attr_hw_trap.attr,
656 NULL, 717 NULL,
657}; 718};
658 719
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 830d63524d61..b70b47fbd6cd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
202 kfree(mc); 202 kfree(mc);
203} 203}
204 204
205static void qeth_l2_del_all_mc(struct qeth_card *card) 205static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
206{ 206{
207 struct qeth_mc_mac *mc, *tmp; 207 struct qeth_mc_mac *mc, *tmp;
208 208
209 spin_lock_bh(&card->mclock); 209 spin_lock_bh(&card->mclock);
210 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { 210 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
211 if (mc->is_vmac) 211 if (del) {
212 qeth_l2_send_setdelmac(card, mc->mc_addr, 212 if (mc->is_vmac)
213 qeth_l2_send_setdelmac(card, mc->mc_addr,
213 IPA_CMD_DELVMAC, NULL); 214 IPA_CMD_DELVMAC, NULL);
214 else 215 else
215 qeth_l2_send_delgroupmac(card, mc->mc_addr); 216 qeth_l2_send_delgroupmac(card, mc->mc_addr);
217 }
216 list_del(&mc->list); 218 list_del(&mc->list);
217 kfree(mc); 219 kfree(mc);
218 } 220 }
@@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
288 qeth_l2_send_setdelvlan_cb, NULL); 290 qeth_l2_send_setdelvlan_cb, NULL);
289} 291}
290 292
291static void qeth_l2_process_vlans(struct qeth_card *card, int clear) 293static void qeth_l2_process_vlans(struct qeth_card *card)
292{ 294{
293 struct qeth_vlan_vid *id; 295 struct qeth_vlan_vid *id;
294 QETH_CARD_TEXT(card, 3, "L2prcvln"); 296 QETH_CARD_TEXT(card, 3, "L2prcvln");
295 spin_lock_bh(&card->vlanlock); 297 spin_lock_bh(&card->vlanlock);
296 list_for_each_entry(id, &card->vid_list, list) { 298 list_for_each_entry(id, &card->vid_list, list) {
297 if (clear) 299 qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
298 qeth_l2_send_setdelvlan(card, id->vid,
299 IPA_CMD_DELVLAN);
300 else
301 qeth_l2_send_setdelvlan(card, id->vid,
302 IPA_CMD_SETVLAN);
303 } 300 }
304 spin_unlock_bh(&card->vlanlock); 301 spin_unlock_bh(&card->vlanlock);
305} 302}
@@ -310,6 +307,8 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
310 struct qeth_vlan_vid *id; 307 struct qeth_vlan_vid *id;
311 308
312 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 309 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
310 if (!vid)
311 return;
313 if (card->info.type == QETH_CARD_TYPE_OSM) { 312 if (card->info.type == QETH_CARD_TYPE_OSM) {
314 QETH_CARD_TEXT(card, 3, "aidOSM"); 313 QETH_CARD_TEXT(card, 3, "aidOSM");
315 return; 314 return;
@@ -377,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
377 dev_close(card->dev); 376 dev_close(card->dev);
378 rtnl_unlock(); 377 rtnl_unlock();
379 } 378 }
380 if (!card->use_hard_stop || 379 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
381 recovery_mode) {
382 __u8 *mac = &card->dev->dev_addr[0];
383 rc = qeth_l2_send_delmac(card, mac);
384 QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc);
385 }
386 card->state = CARD_STATE_SOFTSETUP; 380 card->state = CARD_STATE_SOFTSETUP;
387 } 381 }
388 if (card->state == CARD_STATE_SOFTSETUP) { 382 if (card->state == CARD_STATE_SOFTSETUP) {
389 qeth_l2_process_vlans(card, 1); 383 qeth_l2_del_all_mc(card, 0);
390 if (!card->use_hard_stop ||
391 recovery_mode)
392 qeth_l2_del_all_mc(card);
393 qeth_clear_ipacmd_list(card); 384 qeth_clear_ipacmd_list(card);
394 card->state = CARD_STATE_HARDSETUP; 385 card->state = CARD_STATE_HARDSETUP;
395 } 386 }
@@ -403,45 +394,37 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
403 qeth_clear_cmd_buffers(&card->read); 394 qeth_clear_cmd_buffers(&card->read);
404 qeth_clear_cmd_buffers(&card->write); 395 qeth_clear_cmd_buffers(&card->write);
405 } 396 }
406 card->use_hard_stop = 0;
407 return rc; 397 return rc;
408} 398}
409 399
410static void qeth_l2_process_inbound_buffer(struct qeth_card *card, 400static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
411 struct qeth_qdio_buffer *buf, int index) 401 int budget, int *done)
412{ 402{
413 struct qdio_buffer_element *element; 403 int work_done = 0;
414 struct sk_buff *skb; 404 struct sk_buff *skb;
415 struct qeth_hdr *hdr; 405 struct qeth_hdr *hdr;
416 int offset;
417 unsigned int len; 406 unsigned int len;
418 407
419 /* get first element of current buffer */ 408 *done = 0;
420 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 409 BUG_ON(!budget);
421 offset = 0; 410 while (budget) {
422 if (card->options.performance_stats) 411 skb = qeth_core_get_next_skb(card,
423 card->perf_stats.bufs_rec++; 412 card->qdio.in_q->bufs[card->rx.b_index].buffer,
424 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, 413 &card->rx.b_element, &card->rx.e_offset, &hdr);
425 &offset, &hdr))) { 414 if (!skb) {
426 skb->dev = card->dev; 415 *done = 1;
427 /* is device UP ? */ 416 break;
428 if (!(card->dev->flags & IFF_UP)) {
429 dev_kfree_skb_any(skb);
430 continue;
431 } 417 }
432 418 skb->dev = card->dev;
433 switch (hdr->hdr.l2.id) { 419 switch (hdr->hdr.l2.id) {
434 case QETH_HEADER_TYPE_LAYER2: 420 case QETH_HEADER_TYPE_LAYER2:
435 skb->pkt_type = PACKET_HOST; 421 skb->pkt_type = PACKET_HOST;
436 skb->protocol = eth_type_trans(skb, skb->dev); 422 skb->protocol = eth_type_trans(skb, skb->dev);
437 if (card->options.checksum_type == NO_CHECKSUMMING) 423 skb->ip_summed = CHECKSUM_NONE;
438 skb->ip_summed = CHECKSUM_UNNECESSARY;
439 else
440 skb->ip_summed = CHECKSUM_NONE;
441 if (skb->protocol == htons(ETH_P_802_2)) 424 if (skb->protocol == htons(ETH_P_802_2))
442 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 425 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
443 len = skb->len; 426 len = skb->len;
444 netif_rx(skb); 427 netif_receive_skb(skb);
445 break; 428 break;
446 case QETH_HEADER_TYPE_OSN: 429 case QETH_HEADER_TYPE_OSN:
447 if (card->info.type == QETH_CARD_TYPE_OSN) { 430 if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -459,9 +442,87 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
459 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 442 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
460 continue; 443 continue;
461 } 444 }
445 work_done++;
446 budget--;
462 card->stats.rx_packets++; 447 card->stats.rx_packets++;
463 card->stats.rx_bytes += len; 448 card->stats.rx_bytes += len;
464 } 449 }
450 return work_done;
451}
452
453static int qeth_l2_poll(struct napi_struct *napi, int budget)
454{
455 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
456 int work_done = 0;
457 struct qeth_qdio_buffer *buffer;
458 int done;
459 int new_budget = budget;
460
461 if (card->options.performance_stats) {
462 card->perf_stats.inbound_cnt++;
463 card->perf_stats.inbound_start_time = qeth_get_micros();
464 }
465
466 while (1) {
467 if (!card->rx.b_count) {
468 card->rx.qdio_err = 0;
469 card->rx.b_count = qdio_get_next_buffers(
470 card->data.ccwdev, 0, &card->rx.b_index,
471 &card->rx.qdio_err);
472 if (card->rx.b_count <= 0) {
473 card->rx.b_count = 0;
474 break;
475 }
476 card->rx.b_element =
477 &card->qdio.in_q->bufs[card->rx.b_index]
478 .buffer->element[0];
479 card->rx.e_offset = 0;
480 }
481
482 while (card->rx.b_count) {
483 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
484 if (!(card->rx.qdio_err &&
485 qeth_check_qdio_errors(card, buffer->buffer,
486 card->rx.qdio_err, "qinerr")))
487 work_done += qeth_l2_process_inbound_buffer(
488 card, new_budget, &done);
489 else
490 done = 1;
491
492 if (done) {
493 if (card->options.performance_stats)
494 card->perf_stats.bufs_rec++;
495 qeth_put_buffer_pool_entry(card,
496 buffer->pool_entry);
497 qeth_queue_input_buffer(card, card->rx.b_index);
498 card->rx.b_count--;
499 if (card->rx.b_count) {
500 card->rx.b_index =
501 (card->rx.b_index + 1) %
502 QDIO_MAX_BUFFERS_PER_Q;
503 card->rx.b_element =
504 &card->qdio.in_q
505 ->bufs[card->rx.b_index]
506 .buffer->element[0];
507 card->rx.e_offset = 0;
508 }
509 }
510
511 if (work_done >= budget)
512 goto out;
513 else
514 new_budget = budget - work_done;
515 }
516 }
517
518 napi_complete(napi);
519 if (qdio_start_irq(card->data.ccwdev, 0))
520 napi_schedule(&card->napi);
521out:
522 if (card->options.performance_stats)
523 card->perf_stats.inbound_time += qeth_get_micros() -
524 card->perf_stats.inbound_start_time;
525 return work_done;
465} 526}
466 527
467static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, 528static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
@@ -497,13 +558,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
497 case IPA_RC_L2_DUP_LAYER3_MAC: 558 case IPA_RC_L2_DUP_LAYER3_MAC:
498 dev_warn(&card->gdev->dev, 559 dev_warn(&card->gdev->dev,
499 "MAC address %pM already exists\n", 560 "MAC address %pM already exists\n",
500 card->dev->dev_addr); 561 cmd->data.setdelmac.mac);
501 break; 562 break;
502 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 563 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
503 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 564 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
504 dev_warn(&card->gdev->dev, 565 dev_warn(&card->gdev->dev,
505 "MAC address %pM is not authorized\n", 566 "MAC address %pM is not authorized\n",
506 card->dev->dev_addr); 567 cmd->data.setdelmac.mac);
507 break; 568 break;
508 default: 569 default:
509 break; 570 break;
@@ -629,7 +690,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
629 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 690 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
630 (card->state != CARD_STATE_UP)) 691 (card->state != CARD_STATE_UP))
631 return; 692 return;
632 qeth_l2_del_all_mc(card); 693 qeth_l2_del_all_mc(card, 1);
633 spin_lock_bh(&card->mclock); 694 spin_lock_bh(&card->mclock);
634 netdev_for_each_mc_addr(ha, dev) 695 netdev_for_each_mc_addr(ha, dev)
635 qeth_l2_add_mc(card, ha->addr, 0); 696 qeth_l2_add_mc(card, ha->addr, 0);
@@ -755,51 +816,14 @@ tx_drop:
755 return NETDEV_TX_OK; 816 return NETDEV_TX_OK;
756} 817}
757 818
758static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, 819static int __qeth_l2_open(struct net_device *dev)
759 unsigned int qdio_err, unsigned int queue,
760 int first_element, int count, unsigned long card_ptr)
761{
762 struct net_device *net_dev;
763 struct qeth_card *card;
764 struct qeth_qdio_buffer *buffer;
765 int index;
766 int i;
767
768 card = (struct qeth_card *) card_ptr;
769 net_dev = card->dev;
770 if (card->options.performance_stats) {
771 card->perf_stats.inbound_cnt++;
772 card->perf_stats.inbound_start_time = qeth_get_micros();
773 }
774 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
775 QETH_CARD_TEXT(card, 1, "qdinchk");
776 QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
777 count);
778 QETH_CARD_TEXT_(card, 1, "%04X", queue);
779 qeth_schedule_recovery(card);
780 return;
781 }
782 for (i = first_element; i < (first_element + count); ++i) {
783 index = i % QDIO_MAX_BUFFERS_PER_Q;
784 buffer = &card->qdio.in_q->bufs[index];
785 if (!(qdio_err &&
786 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
787 "qinerr")))
788 qeth_l2_process_inbound_buffer(card, buffer, index);
789 /* clear buffer and give back to hardware */
790 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
791 qeth_queue_input_buffer(card, index);
792 }
793 if (card->options.performance_stats)
794 card->perf_stats.inbound_time += qeth_get_micros() -
795 card->perf_stats.inbound_start_time;
796}
797
798static int qeth_l2_open(struct net_device *dev)
799{ 820{
800 struct qeth_card *card = dev->ml_priv; 821 struct qeth_card *card = dev->ml_priv;
822 int rc = 0;
801 823
802 QETH_CARD_TEXT(card, 4, "qethopen"); 824 QETH_CARD_TEXT(card, 4, "qethopen");
825 if (card->state == CARD_STATE_UP)
826 return rc;
803 if (card->state != CARD_STATE_SOFTSETUP) 827 if (card->state != CARD_STATE_SOFTSETUP)
804 return -ENODEV; 828 return -ENODEV;
805 829
@@ -812,11 +836,25 @@ static int qeth_l2_open(struct net_device *dev)
812 card->state = CARD_STATE_UP; 836 card->state = CARD_STATE_UP;
813 netif_start_queue(dev); 837 netif_start_queue(dev);
814 838
815 if (!card->lan_online && netif_carrier_ok(dev)) 839 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
816 netif_carrier_off(dev); 840 napi_enable(&card->napi);
817 return 0; 841 napi_schedule(&card->napi);
842 } else
843 rc = -EIO;
844 return rc;
818} 845}
819 846
847static int qeth_l2_open(struct net_device *dev)
848{
849 struct qeth_card *card = dev->ml_priv;
850
851 QETH_CARD_TEXT(card, 5, "qethope_");
852 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
853 QETH_CARD_TEXT(card, 3, "openREC");
854 return -ERESTARTSYS;
855 }
856 return __qeth_l2_open(dev);
857}
820 858
821static int qeth_l2_stop(struct net_device *dev) 859static int qeth_l2_stop(struct net_device *dev)
822{ 860{
@@ -824,8 +862,10 @@ static int qeth_l2_stop(struct net_device *dev)
824 862
825 QETH_CARD_TEXT(card, 4, "qethstop"); 863 QETH_CARD_TEXT(card, 4, "qethstop");
826 netif_tx_disable(dev); 864 netif_tx_disable(dev);
827 if (card->state == CARD_STATE_UP) 865 if (card->state == CARD_STATE_UP) {
828 card->state = CARD_STATE_SOFTSETUP; 866 card->state = CARD_STATE_SOFTSETUP;
867 napi_disable(&card->napi);
868 }
829 return 0; 869 return 0;
830} 870}
831 871
@@ -836,8 +876,10 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
836 INIT_LIST_HEAD(&card->vid_list); 876 INIT_LIST_HEAD(&card->vid_list);
837 INIT_LIST_HEAD(&card->mc_list); 877 INIT_LIST_HEAD(&card->mc_list);
838 card->options.layer2 = 1; 878 card->options.layer2 = 1;
879 card->info.hwtrap = 0;
880 card->discipline.start_poll = qeth_qdio_start_poll;
839 card->discipline.input_handler = (qdio_handler_t *) 881 card->discipline.input_handler = (qdio_handler_t *)
840 qeth_l2_qdio_input_handler; 882 qeth_qdio_input_handler;
841 card->discipline.output_handler = (qdio_handler_t *) 883 card->discipline.output_handler = (qdio_handler_t *)
842 qeth_qdio_output_handler; 884 qeth_qdio_output_handler;
843 card->discipline.recover = qeth_l2_recover; 885 card->discipline.recover = qeth_l2_recover;
@@ -851,10 +893,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
851 qeth_set_allowed_threads(card, 0, 1); 893 qeth_set_allowed_threads(card, 0, 1);
852 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 894 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
853 895
854 if (cgdev->state == CCWGROUP_ONLINE) { 896 if (cgdev->state == CCWGROUP_ONLINE)
855 card->use_hard_stop = 1;
856 qeth_l2_set_offline(cgdev); 897 qeth_l2_set_offline(cgdev);
857 }
858 898
859 if (card->dev) { 899 if (card->dev) {
860 unregister_netdev(card->dev); 900 unregister_netdev(card->dev);
@@ -923,6 +963,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
923 card->info.broadcast_capable = 1; 963 card->info.broadcast_capable = 1;
924 qeth_l2_request_initial_mac(card); 964 qeth_l2_request_initial_mac(card);
925 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 965 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
966 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
926 return register_netdev(card->dev); 967 return register_netdev(card->dev);
927} 968}
928 969
@@ -954,7 +995,15 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
954 if (card->info.type != QETH_CARD_TYPE_OSN) 995 if (card->info.type != QETH_CARD_TYPE_OSN)
955 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 996 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
956 997
998 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
999 if (card->info.hwtrap &&
1000 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
1001 card->info.hwtrap = 0;
1002 } else
1003 card->info.hwtrap = 0;
1004
957 card->state = CARD_STATE_HARDSETUP; 1005 card->state = CARD_STATE_HARDSETUP;
1006 memset(&card->rx, 0, sizeof(struct qeth_rx));
958 qeth_print_status_message(card); 1007 qeth_print_status_message(card);
959 1008
960 /* softsetup */ 1009 /* softsetup */
@@ -967,13 +1016,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
967 dev_warn(&card->gdev->dev, 1016 dev_warn(&card->gdev->dev,
968 "The LAN is offline\n"); 1017 "The LAN is offline\n");
969 card->lan_online = 0; 1018 card->lan_online = 0;
970 goto out; 1019 goto contin;
971 } 1020 }
972 rc = -ENODEV; 1021 rc = -ENODEV;
973 goto out_remove; 1022 goto out_remove;
974 } else 1023 } else
975 card->lan_online = 1; 1024 card->lan_online = 1;
976 1025
1026contin:
977 if ((card->info.type == QETH_CARD_TYPE_OSD) || 1027 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
978 (card->info.type == QETH_CARD_TYPE_OSX)) 1028 (card->info.type == QETH_CARD_TYPE_OSX))
979 /* configure isolation level */ 1029 /* configure isolation level */
@@ -981,7 +1031,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
981 1031
982 if (card->info.type != QETH_CARD_TYPE_OSN && 1032 if (card->info.type != QETH_CARD_TYPE_OSN &&
983 card->info.type != QETH_CARD_TYPE_OSM) 1033 card->info.type != QETH_CARD_TYPE_OSM)
984 qeth_l2_process_vlans(card, 0); 1034 qeth_l2_process_vlans(card);
985 1035
986 netif_tx_disable(card->dev); 1036 netif_tx_disable(card->dev);
987 1037
@@ -992,13 +1042,16 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
992 goto out_remove; 1042 goto out_remove;
993 } 1043 }
994 card->state = CARD_STATE_SOFTSETUP; 1044 card->state = CARD_STATE_SOFTSETUP;
995 netif_carrier_on(card->dev); 1045 if (card->lan_online)
1046 netif_carrier_on(card->dev);
1047 else
1048 netif_carrier_off(card->dev);
996 1049
997 qeth_set_allowed_threads(card, 0xffffffff, 0); 1050 qeth_set_allowed_threads(card, 0xffffffff, 0);
998 if (recover_flag == CARD_STATE_RECOVER) { 1051 if (recover_flag == CARD_STATE_RECOVER) {
999 if (recovery_mode && 1052 if (recovery_mode &&
1000 card->info.type != QETH_CARD_TYPE_OSN) { 1053 card->info.type != QETH_CARD_TYPE_OSN) {
1001 qeth_l2_open(card->dev); 1054 __qeth_l2_open(card->dev);
1002 } else { 1055 } else {
1003 rtnl_lock(); 1056 rtnl_lock();
1004 dev_open(card->dev); 1057 dev_open(card->dev);
@@ -1009,13 +1062,11 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1009 } 1062 }
1010 /* let user_space know that device is online */ 1063 /* let user_space know that device is online */
1011 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1064 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1012out:
1013 mutex_unlock(&card->conf_mutex); 1065 mutex_unlock(&card->conf_mutex);
1014 mutex_unlock(&card->discipline_mutex); 1066 mutex_unlock(&card->discipline_mutex);
1015 return 0; 1067 return 0;
1016 1068
1017out_remove: 1069out_remove:
1018 card->use_hard_stop = 1;
1019 qeth_l2_stop_card(card, 0); 1070 qeth_l2_stop_card(card, 0);
1020 ccw_device_set_offline(CARD_DDEV(card)); 1071 ccw_device_set_offline(CARD_DDEV(card));
1021 ccw_device_set_offline(CARD_WDEV(card)); 1072 ccw_device_set_offline(CARD_WDEV(card));
@@ -1049,6 +1100,10 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1049 if (card->dev && netif_carrier_ok(card->dev)) 1100 if (card->dev && netif_carrier_ok(card->dev))
1050 netif_carrier_off(card->dev); 1101 netif_carrier_off(card->dev);
1051 recover_flag = card->state; 1102 recover_flag = card->state;
1103 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
1104 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1105 card->info.hwtrap = 1;
1106 }
1052 qeth_l2_stop_card(card, recovery_mode); 1107 qeth_l2_stop_card(card, recovery_mode);
1053 rc = ccw_device_set_offline(CARD_DDEV(card)); 1108 rc = ccw_device_set_offline(CARD_DDEV(card));
1054 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 1109 rc2 = ccw_device_set_offline(CARD_WDEV(card));
@@ -1083,12 +1138,8 @@ static int qeth_l2_recover(void *ptr)
1083 QETH_CARD_TEXT(card, 2, "recover2"); 1138 QETH_CARD_TEXT(card, 2, "recover2");
1084 dev_warn(&card->gdev->dev, 1139 dev_warn(&card->gdev->dev,
1085 "A recovery process has been started for the device\n"); 1140 "A recovery process has been started for the device\n");
1086 card->use_hard_stop = 1;
1087 __qeth_l2_set_offline(card->gdev, 1); 1141 __qeth_l2_set_offline(card->gdev, 1);
1088 rc = __qeth_l2_set_online(card->gdev, 1); 1142 rc = __qeth_l2_set_online(card->gdev, 1);
1089 /* don't run another scheduled recovery */
1090 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1091 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1092 if (!rc) 1143 if (!rc)
1093 dev_info(&card->gdev->dev, 1144 dev_info(&card->gdev->dev,
1094 "Device successfully recovered!\n"); 1145 "Device successfully recovered!\n");
@@ -1099,6 +1150,8 @@ static int qeth_l2_recover(void *ptr)
1099 dev_warn(&card->gdev->dev, "The qeth device driver " 1150 dev_warn(&card->gdev->dev, "The qeth device driver "
1100 "failed to recover an error on the device\n"); 1151 "failed to recover an error on the device\n");
1101 } 1152 }
1153 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1154 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1102 return 0; 1155 return 0;
1103} 1156}
1104 1157
@@ -1116,6 +1169,8 @@ static void __exit qeth_l2_exit(void)
1116static void qeth_l2_shutdown(struct ccwgroup_device *gdev) 1169static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1117{ 1170{
1118 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 1171 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1172 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
1173 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1119 qeth_qdio_clear_card(card, 0); 1174 qeth_qdio_clear_card(card, 0);
1120 qeth_clear_qdio_buffers(card); 1175 qeth_clear_qdio_buffers(card);
1121} 1176}
@@ -1131,7 +1186,8 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
1131 if (gdev->state == CCWGROUP_OFFLINE) 1186 if (gdev->state == CCWGROUP_OFFLINE)
1132 return 0; 1187 return 0;
1133 if (card->state == CARD_STATE_UP) { 1188 if (card->state == CARD_STATE_UP) {
1134 card->use_hard_stop = 1; 1189 if (card->info.hwtrap)
1190 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1135 __qeth_l2_set_offline(card->gdev, 1); 1191 __qeth_l2_set_offline(card->gdev, 1);
1136 } else 1192 } else
1137 __qeth_l2_set_offline(card->gdev, 0); 1193 __qeth_l2_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e705b27ec7dc..14a43aeb0c2a 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -62,8 +62,6 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
62int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 62int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
64 const u8 *); 64 const u8 *);
65int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
66int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
67int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 65int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
68 66
69#endif /* __QETH_L3_H__ */ 67#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e22ae248f613..fd69da3fa6b4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -30,6 +30,7 @@
30 30
31#include "qeth_l3.h" 31#include "qeth_l3.h"
32 32
33
33static int qeth_l3_set_offline(struct ccwgroup_device *); 34static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *); 35static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *); 36static int qeth_l3_stop(struct net_device *);
@@ -42,33 +43,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
42static int __qeth_l3_set_online(struct ccwgroup_device *, int); 43static int __qeth_l3_set_online(struct ccwgroup_device *, int);
43static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 44static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
44 45
45int qeth_l3_set_large_send(struct qeth_card *card,
46 enum qeth_large_send_types type)
47{
48 int rc = 0;
49
50 card->options.large_send = type;
51 if (card->dev == NULL)
52 return 0;
53
54 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
55 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
56 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
57 NETIF_F_IP_CSUM;
58 } else {
59 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
60 NETIF_F_IP_CSUM);
61 card->options.large_send = QETH_LARGE_SEND_NO;
62 rc = -EOPNOTSUPP;
63 }
64 } else {
65 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
66 NETIF_F_IP_CSUM);
67 card->options.large_send = QETH_LARGE_SEND_NO;
68 }
69 return rc;
70}
71
72static int qeth_l3_isxdigit(char *buf) 46static int qeth_l3_isxdigit(char *buf)
73{ 47{
74 while (*buf) { 48 while (*buf) {
@@ -103,12 +77,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
103 77
104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 78void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
105{ 79{
106 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 80 sprintf(buf, "%pI6", addr);
107 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
108 addr[0], addr[1], addr[2], addr[3],
109 addr[4], addr[5], addr[6], addr[7],
110 addr[8], addr[9], addr[10], addr[11],
111 addr[12], addr[13], addr[14], addr[15]);
112} 81}
113 82
114int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) 83int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
@@ -460,8 +429,11 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
460 QETH_CARD_TEXT(card, 2, "sdiplist"); 429 QETH_CARD_TEXT(card, 2, "sdiplist");
461 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 430 QETH_CARD_HEX(card, 2, &card, sizeof(void *));
462 431
463 if (card->options.sniffer) 432 if ((card->state != CARD_STATE_UP &&
433 card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
464 return; 434 return;
435 }
436
465 spin_lock_irqsave(&card->ip_lock, flags); 437 spin_lock_irqsave(&card->ip_lock, flags);
466 tbd_list = card->ip_tbd_list; 438 tbd_list = card->ip_tbd_list;
467 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 439 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -511,8 +483,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
511 kfree(tbd_list); 483 kfree(tbd_list);
512} 484}
513 485
514static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, 486static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
515 int recover)
516{ 487{
517 struct qeth_ipaddr *addr, *tmp; 488 struct qeth_ipaddr *addr, *tmp;
518 unsigned long flags; 489 unsigned long flags;
@@ -531,11 +502,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
531 addr = list_entry(card->ip_list.next, 502 addr = list_entry(card->ip_list.next,
532 struct qeth_ipaddr, entry); 503 struct qeth_ipaddr, entry);
533 list_del_init(&addr->entry); 504 list_del_init(&addr->entry);
534 if (clean) {
535 spin_unlock_irqrestore(&card->ip_lock, flags);
536 qeth_l3_deregister_addr_entry(card, addr);
537 spin_lock_irqsave(&card->ip_lock, flags);
538 }
539 if (!recover || addr->is_multicast) { 505 if (!recover || addr->is_multicast) {
540 kfree(addr); 506 kfree(addr);
541 continue; 507 continue;
@@ -1311,39 +1277,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1311 return rc; 1277 return rc;
1312} 1278}
1313 1279
1314static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1315 struct qeth_reply *reply, unsigned long data)
1316{
1317 struct qeth_ipa_cmd *cmd;
1318
1319 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
1320
1321 cmd = (struct qeth_ipa_cmd *) data;
1322 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1323 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1324 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1325 } else {
1326 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1327 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1328 }
1329 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
1330 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
1331 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
1332 return 0;
1333}
1334
1335static int qeth_l3_query_ipassists(struct qeth_card *card,
1336 enum qeth_prot_versions prot)
1337{
1338 int rc;
1339 struct qeth_cmd_buffer *iob;
1340
1341 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
1342 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1343 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1344 return rc;
1345}
1346
1347#ifdef CONFIG_QETH_IPV6 1280#ifdef CONFIG_QETH_IPV6
1348static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 1281static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1349{ 1282{
@@ -1354,7 +1287,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1354 if (card->info.type == QETH_CARD_TYPE_IQD) 1287 if (card->info.type == QETH_CARD_TYPE_IQD)
1355 goto out; 1288 goto out;
1356 1289
1357 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); 1290 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
1358 if (rc) { 1291 if (rc) {
1359 dev_err(&card->gdev->dev, 1292 dev_err(&card->gdev->dev,
1360 "Activating IPv6 support for %s failed\n", 1293 "Activating IPv6 support for %s failed\n",
@@ -1479,68 +1412,38 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1479 return 0; 1412 return 0;
1480} 1413}
1481 1414
1482int qeth_l3_set_rx_csum(struct qeth_card *card, 1415int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
1483 enum qeth_checksum_types csum_type)
1484{ 1416{
1485 int rc = 0; 1417 int rc = 0;
1486 1418
1487 if (card->options.checksum_type == HW_CHECKSUMMING) { 1419 if (on) {
1488 if ((csum_type != HW_CHECKSUMMING) && 1420 rc = qeth_l3_send_checksum_command(card);
1489 (card->state != CARD_STATE_DOWN)) { 1421 if (rc)
1490 rc = qeth_l3_send_simple_setassparms(card, 1422 return -EIO;
1491 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); 1423 dev_info(&card->gdev->dev,
1492 if (rc) 1424 "HW Checksumming (inbound) enabled\n");
1493 return -EIO;
1494 }
1495 } else { 1425 } else {
1496 if (csum_type == HW_CHECKSUMMING) { 1426 rc = qeth_l3_send_simple_setassparms(card,
1497 if (card->state != CARD_STATE_DOWN) { 1427 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1498 if (!qeth_is_supported(card, 1428 if (rc)
1499 IPA_INBOUND_CHECKSUM)) 1429 return -EIO;
1500 return -EPERM;
1501 rc = qeth_l3_send_checksum_command(card);
1502 if (rc)
1503 return -EIO;
1504 }
1505 }
1506 } 1430 }
1507 card->options.checksum_type = csum_type; 1431
1508 return rc; 1432 return 0;
1509} 1433}
1510 1434
1511static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1435static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1512{ 1436{
1513 int rc = 0;
1514
1515 QETH_CARD_TEXT(card, 3, "strtcsum"); 1437 QETH_CARD_TEXT(card, 3, "strtcsum");
1516 1438
1517 if (card->options.checksum_type == NO_CHECKSUMMING) { 1439 if (card->dev->features & NETIF_F_RXCSUM) {
1518 dev_info(&card->gdev->dev, 1440 rtnl_lock();
1519 "Using no checksumming on %s.\n", 1441 /* force set_features call */
1520 QETH_CARD_IFNAME(card)); 1442 card->dev->features &= ~NETIF_F_RXCSUM;
1521 return 0; 1443 netdev_update_features(card->dev);
1522 } 1444 rtnl_unlock();
1523 if (card->options.checksum_type == SW_CHECKSUMMING) {
1524 dev_info(&card->gdev->dev,
1525 "Using SW checksumming on %s.\n",
1526 QETH_CARD_IFNAME(card));
1527 return 0;
1528 }
1529 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1530 dev_info(&card->gdev->dev,
1531 "Inbound HW Checksumming not "
1532 "supported on %s,\ncontinuing "
1533 "using Inbound SW Checksumming\n",
1534 QETH_CARD_IFNAME(card));
1535 card->options.checksum_type = SW_CHECKSUMMING;
1536 return 0;
1537 } 1445 }
1538 rc = qeth_l3_send_checksum_command(card); 1446 return 0;
1539 if (!rc)
1540 dev_info(&card->gdev->dev,
1541 "HW Checksumming (inbound) enabled\n");
1542
1543 return rc;
1544} 1447}
1545 1448
1546static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card) 1449static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
@@ -1587,10 +1490,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1587 dev_info(&card->gdev->dev, 1490 dev_info(&card->gdev->dev,
1588 "Outbound TSO enabled\n"); 1491 "Outbound TSO enabled\n");
1589 } 1492 }
1590 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { 1493 if (rc)
1591 card->options.large_send = QETH_LARGE_SEND_NO; 1494 card->dev->features &= ~NETIF_F_TSO;
1592 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1593 }
1594 return rc; 1495 return rc;
1595} 1496}
1596 1497
@@ -1612,29 +1513,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
1612 return 0; 1513 return 0;
1613} 1514}
1614 1515
1615static int qeth_l3_put_unique_id(struct qeth_card *card)
1616{
1617
1618 int rc = 0;
1619 struct qeth_cmd_buffer *iob;
1620 struct qeth_ipa_cmd *cmd;
1621
1622 QETH_CARD_TEXT(card, 2, "puniqeid");
1623
1624 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
1625 UNIQUE_ID_NOT_BY_CARD)
1626 return -1;
1627 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR,
1628 QETH_PROT_IPV6);
1629 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1630 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
1631 card->info.unique_id;
1632 memcpy(&cmd->data.create_destroy_addr.unique_id[0],
1633 card->dev->dev_addr, OSA_ADDR_LEN);
1634 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
1635 return rc;
1636}
1637
1638static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 1516static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1639 struct qeth_reply *reply, unsigned long data) 1517 struct qeth_reply *reply, unsigned long data)
1640{ 1518{
@@ -1801,7 +1679,8 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1801 char buf[MAX_ADDR_LEN]; 1679 char buf[MAX_ADDR_LEN];
1802 1680
1803 QETH_CARD_TEXT(card, 4, "addmc"); 1681 QETH_CARD_TEXT(card, 4, "addmc");
1804 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1682 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1683 im4 = rcu_dereference(im4->next_rcu)) {
1805 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1684 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1806 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1685 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1807 if (!ipm) 1686 if (!ipm)
@@ -1825,7 +1704,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1825 return; 1704 return;
1826 1705
1827 vg = card->vlangrp; 1706 vg = card->vlangrp;
1828 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1707 for (i = 0; i < VLAN_N_VID; i++) {
1829 struct net_device *netdev = vlan_group_get_device(vg, i); 1708 struct net_device *netdev = vlan_group_get_device(vg, i);
1830 if (netdev == NULL || 1709 if (netdev == NULL ||
1831 !(netdev->flags & IFF_UP)) 1710 !(netdev->flags & IFF_UP))
@@ -1833,9 +1712,9 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1833 in_dev = in_dev_get(netdev); 1712 in_dev = in_dev_get(netdev);
1834 if (!in_dev) 1713 if (!in_dev)
1835 continue; 1714 continue;
1836 read_lock(&in_dev->mc_list_lock); 1715 rcu_read_lock();
1837 qeth_l3_add_mc(card, in_dev); 1716 qeth_l3_add_mc(card, in_dev);
1838 read_unlock(&in_dev->mc_list_lock); 1717 rcu_read_unlock();
1839 in_dev_put(in_dev); 1718 in_dev_put(in_dev);
1840 } 1719 }
1841} 1720}
@@ -1848,10 +1727,10 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1848 in4_dev = in_dev_get(card->dev); 1727 in4_dev = in_dev_get(card->dev);
1849 if (in4_dev == NULL) 1728 if (in4_dev == NULL)
1850 return; 1729 return;
1851 read_lock(&in4_dev->mc_list_lock); 1730 rcu_read_lock();
1852 qeth_l3_add_mc(card, in4_dev); 1731 qeth_l3_add_mc(card, in4_dev);
1853 qeth_l3_add_vlan_mc(card); 1732 qeth_l3_add_vlan_mc(card);
1854 read_unlock(&in4_dev->mc_list_lock); 1733 rcu_read_unlock();
1855 in_dev_put(in4_dev); 1734 in_dev_put(in4_dev);
1856} 1735}
1857 1736
@@ -1888,7 +1767,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1888 return; 1767 return;
1889 1768
1890 vg = card->vlangrp; 1769 vg = card->vlangrp;
1891 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1770 for (i = 0; i < VLAN_N_VID; i++) {
1892 struct net_device *netdev = vlan_group_get_device(vg, i); 1771 struct net_device *netdev = vlan_group_get_device(vg, i);
1893 if (netdev == NULL || 1772 if (netdev == NULL ||
1894 !(netdev->flags & IFF_UP)) 1773 !(netdev->flags & IFF_UP))
@@ -2018,13 +1897,14 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2018 qeth_l3_set_multicast_list(card->dev); 1897 qeth_l3_set_multicast_list(card->dev);
2019} 1898}
2020 1899
2021static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, 1900static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
2022 struct sk_buff *skb, struct qeth_hdr *hdr) 1901 struct sk_buff *skb, struct qeth_hdr *hdr,
1902 unsigned short *vlan_id)
2023{ 1903{
2024 unsigned short vlan_id = 0;
2025 __be16 prot; 1904 __be16 prot;
2026 struct iphdr *ip_hdr; 1905 struct iphdr *ip_hdr;
2027 unsigned char tg_addr[MAX_ADDR_LEN]; 1906 unsigned char tg_addr[MAX_ADDR_LEN];
1907 int is_vlan = 0;
2028 1908
2029 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1909 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
2030 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : 1910 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
@@ -2087,18 +1967,12 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2087 1967
2088 if (hdr->hdr.l3.ext_flags & 1968 if (hdr->hdr.l3.ext_flags &
2089 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 1969 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2090 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? 1970 *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
2091 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); 1971 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
1972 is_vlan = 1;
2092 } 1973 }
2093 1974
2094 switch (card->options.checksum_type) { 1975 if (card->dev->features & NETIF_F_RXCSUM) {
2095 case SW_CHECKSUMMING:
2096 skb->ip_summed = CHECKSUM_NONE;
2097 break;
2098 case NO_CHECKSUMMING:
2099 skb->ip_summed = CHECKSUM_UNNECESSARY;
2100 break;
2101 case HW_CHECKSUMMING:
2102 if ((hdr->hdr.l3.ext_flags & 1976 if ((hdr->hdr.l3.ext_flags &
2103 (QETH_HDR_EXT_CSUM_HDR_REQ | 1977 (QETH_HDR_EXT_CSUM_HDR_REQ |
2104 QETH_HDR_EXT_CSUM_TRANSP_REQ)) == 1978 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
@@ -2107,56 +1981,47 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2107 skb->ip_summed = CHECKSUM_UNNECESSARY; 1981 skb->ip_summed = CHECKSUM_UNNECESSARY;
2108 else 1982 else
2109 skb->ip_summed = CHECKSUM_NONE; 1983 skb->ip_summed = CHECKSUM_NONE;
2110 } 1984 } else
1985 skb->ip_summed = CHECKSUM_NONE;
2111 1986
2112 return vlan_id; 1987 return is_vlan;
2113} 1988}
2114 1989
2115static void qeth_l3_process_inbound_buffer(struct qeth_card *card, 1990static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2116 struct qeth_qdio_buffer *buf, int index) 1991 int budget, int *done)
2117{ 1992{
2118 struct qdio_buffer_element *element; 1993 int work_done = 0;
2119 struct sk_buff *skb; 1994 struct sk_buff *skb;
2120 struct qeth_hdr *hdr; 1995 struct qeth_hdr *hdr;
2121 int offset;
2122 __u16 vlan_tag = 0; 1996 __u16 vlan_tag = 0;
1997 int is_vlan;
2123 unsigned int len; 1998 unsigned int len;
2124 /* get first element of current buffer */
2125 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2126 offset = 0;
2127 if (card->options.performance_stats)
2128 card->perf_stats.bufs_rec++;
2129 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
2130 &offset, &hdr))) {
2131 skb->dev = card->dev;
2132 /* is device UP ? */
2133 if (!(card->dev->flags & IFF_UP)) {
2134 dev_kfree_skb_any(skb);
2135 continue;
2136 }
2137 1999
2000 *done = 0;
2001 BUG_ON(!budget);
2002 while (budget) {
2003 skb = qeth_core_get_next_skb(card,
2004 card->qdio.in_q->bufs[card->rx.b_index].buffer,
2005 &card->rx.b_element, &card->rx.e_offset, &hdr);
2006 if (!skb) {
2007 *done = 1;
2008 break;
2009 }
2010 skb->dev = card->dev;
2138 switch (hdr->hdr.l3.id) { 2011 switch (hdr->hdr.l3.id) {
2139 case QETH_HEADER_TYPE_LAYER3: 2012 case QETH_HEADER_TYPE_LAYER3:
2140 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2013 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
2014 &vlan_tag);
2141 len = skb->len; 2015 len = skb->len;
2142 if (vlan_tag && !card->options.sniffer) 2016 if (is_vlan && !card->options.sniffer)
2143 if (card->vlangrp) 2017 vlan_gro_receive(&card->napi, card->vlangrp,
2144 vlan_hwaccel_rx(skb, card->vlangrp, 2018 vlan_tag, skb);
2145 vlan_tag);
2146 else {
2147 dev_kfree_skb_any(skb);
2148 continue;
2149 }
2150 else 2019 else
2151 netif_rx(skb); 2020 napi_gro_receive(&card->napi, skb);
2152 break; 2021 break;
2153 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 2022 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2154 skb->pkt_type = PACKET_HOST; 2023 skb->pkt_type = PACKET_HOST;
2155 skb->protocol = eth_type_trans(skb, skb->dev); 2024 skb->protocol = eth_type_trans(skb, skb->dev);
2156 if (card->options.checksum_type == NO_CHECKSUMMING)
2157 skb->ip_summed = CHECKSUM_UNNECESSARY;
2158 else
2159 skb->ip_summed = CHECKSUM_NONE;
2160 len = skb->len; 2025 len = skb->len;
2161 netif_receive_skb(skb); 2026 netif_receive_skb(skb);
2162 break; 2027 break;
@@ -2166,10 +2031,87 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2166 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 2031 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2167 continue; 2032 continue;
2168 } 2033 }
2169 2034 work_done++;
2035 budget--;
2170 card->stats.rx_packets++; 2036 card->stats.rx_packets++;
2171 card->stats.rx_bytes += len; 2037 card->stats.rx_bytes += len;
2172 } 2038 }
2039 return work_done;
2040}
2041
2042static int qeth_l3_poll(struct napi_struct *napi, int budget)
2043{
2044 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
2045 int work_done = 0;
2046 struct qeth_qdio_buffer *buffer;
2047 int done;
2048 int new_budget = budget;
2049
2050 if (card->options.performance_stats) {
2051 card->perf_stats.inbound_cnt++;
2052 card->perf_stats.inbound_start_time = qeth_get_micros();
2053 }
2054
2055 while (1) {
2056 if (!card->rx.b_count) {
2057 card->rx.qdio_err = 0;
2058 card->rx.b_count = qdio_get_next_buffers(
2059 card->data.ccwdev, 0, &card->rx.b_index,
2060 &card->rx.qdio_err);
2061 if (card->rx.b_count <= 0) {
2062 card->rx.b_count = 0;
2063 break;
2064 }
2065 card->rx.b_element =
2066 &card->qdio.in_q->bufs[card->rx.b_index]
2067 .buffer->element[0];
2068 card->rx.e_offset = 0;
2069 }
2070
2071 while (card->rx.b_count) {
2072 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
2073 if (!(card->rx.qdio_err &&
2074 qeth_check_qdio_errors(card, buffer->buffer,
2075 card->rx.qdio_err, "qinerr")))
2076 work_done += qeth_l3_process_inbound_buffer(
2077 card, new_budget, &done);
2078 else
2079 done = 1;
2080
2081 if (done) {
2082 if (card->options.performance_stats)
2083 card->perf_stats.bufs_rec++;
2084 qeth_put_buffer_pool_entry(card,
2085 buffer->pool_entry);
2086 qeth_queue_input_buffer(card, card->rx.b_index);
2087 card->rx.b_count--;
2088 if (card->rx.b_count) {
2089 card->rx.b_index =
2090 (card->rx.b_index + 1) %
2091 QDIO_MAX_BUFFERS_PER_Q;
2092 card->rx.b_element =
2093 &card->qdio.in_q
2094 ->bufs[card->rx.b_index]
2095 .buffer->element[0];
2096 card->rx.e_offset = 0;
2097 }
2098 }
2099
2100 if (work_done >= budget)
2101 goto out;
2102 else
2103 new_budget = budget - work_done;
2104 }
2105 }
2106
2107 napi_complete(napi);
2108 if (qdio_start_irq(card->data.ccwdev, 0))
2109 napi_schedule(&card->napi);
2110out:
2111 if (card->options.performance_stats)
2112 card->perf_stats.inbound_time += qeth_get_micros() -
2113 card->perf_stats.inbound_start_time;
2114 return work_done;
2173} 2115}
2174 2116
2175static int qeth_l3_verify_vlan_dev(struct net_device *dev, 2117static int qeth_l3_verify_vlan_dev(struct net_device *dev,
@@ -2183,7 +2125,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2183 if (!vg) 2125 if (!vg)
2184 return rc; 2126 return rc;
2185 2127
2186 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 2128 for (i = 0; i < VLAN_N_VID; i++) {
2187 if (vlan_group_get_device(vg, i) == dev) { 2129 if (vlan_group_get_device(vg, i) == dev) {
2188 rc = QETH_VLAN_CARD; 2130 rc = QETH_VLAN_CARD;
2189 break; 2131 break;
@@ -2255,25 +2197,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2255 dev_close(card->dev); 2197 dev_close(card->dev);
2256 rtnl_unlock(); 2198 rtnl_unlock();
2257 } 2199 }
2258 if (!card->use_hard_stop) {
2259 rc = qeth_send_stoplan(card);
2260 if (rc)
2261 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2262 }
2263 card->state = CARD_STATE_SOFTSETUP; 2200 card->state = CARD_STATE_SOFTSETUP;
2264 } 2201 }
2265 if (card->state == CARD_STATE_SOFTSETUP) { 2202 if (card->state == CARD_STATE_SOFTSETUP) {
2266 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); 2203 qeth_l3_clear_ip_list(card, 1);
2267 qeth_clear_ipacmd_list(card); 2204 qeth_clear_ipacmd_list(card);
2268 card->state = CARD_STATE_HARDSETUP; 2205 card->state = CARD_STATE_HARDSETUP;
2269 } 2206 }
2270 if (card->state == CARD_STATE_HARDSETUP) { 2207 if (card->state == CARD_STATE_HARDSETUP) {
2271 if (!card->use_hard_stop &&
2272 (card->info.type != QETH_CARD_TYPE_IQD)) {
2273 rc = qeth_l3_put_unique_id(card);
2274 if (rc)
2275 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2276 }
2277 qeth_qdio_clear_card(card, 0); 2208 qeth_qdio_clear_card(card, 0);
2278 qeth_clear_qdio_buffers(card); 2209 qeth_clear_qdio_buffers(card);
2279 qeth_clear_working_pool_list(card); 2210 qeth_clear_working_pool_list(card);
@@ -2283,7 +2214,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2283 qeth_clear_cmd_buffers(&card->read); 2214 qeth_clear_cmd_buffers(&card->read);
2284 qeth_clear_cmd_buffers(&card->write); 2215 qeth_clear_cmd_buffers(&card->write);
2285 } 2216 }
2286 card->use_hard_stop = 0;
2287 return rc; 2217 return rc;
2288} 2218}
2289 2219
@@ -2390,22 +2320,46 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2390 return rc; 2320 return rc;
2391} 2321}
2392 2322
2393static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 2323static __u32 get_arp_entry_size(struct qeth_card *card,
2394 struct qeth_arp_query_data *qdata, int entry_size, 2324 struct qeth_arp_query_data *qdata,
2395 int uentry_size) 2325 struct qeth_arp_entrytype *type, __u8 strip_entries)
2396{ 2326{
2397 char *entry_ptr; 2327 __u32 rc;
2398 char *uentry_ptr; 2328 __u8 is_hsi;
2399 int i;
2400 2329
2401 entry_ptr = (char *)&qdata->data; 2330 is_hsi = qdata->reply_bits == 5;
2402 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); 2331 if (type->ip == QETHARP_IP_ADDR_V4) {
2403 for (i = 0; i < qdata->no_entries; ++i) { 2332 QETH_CARD_TEXT(card, 4, "arpev4");
2404 /* strip off 32 bytes "media specific information" */ 2333 if (strip_entries) {
2405 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); 2334 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
2406 entry_ptr += entry_size; 2335 sizeof(struct qeth_arp_qi_entry7_short);
2407 uentry_ptr += uentry_size; 2336 } else {
2337 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
2338 sizeof(struct qeth_arp_qi_entry7);
2339 }
2340 } else if (type->ip == QETHARP_IP_ADDR_V6) {
2341 QETH_CARD_TEXT(card, 4, "arpev6");
2342 if (strip_entries) {
2343 rc = is_hsi ?
2344 sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
2345 sizeof(struct qeth_arp_qi_entry7_short_ipv6);
2346 } else {
2347 rc = is_hsi ?
2348 sizeof(struct qeth_arp_qi_entry5_ipv6) :
2349 sizeof(struct qeth_arp_qi_entry7_ipv6);
2350 }
2351 } else {
2352 QETH_CARD_TEXT(card, 4, "arpinv");
2353 rc = 0;
2408 } 2354 }
2355
2356 return rc;
2357}
2358
2359static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
2360{
2361 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
2362 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
2409} 2363}
2410 2364
2411static int qeth_l3_arp_query_cb(struct qeth_card *card, 2365static int qeth_l3_arp_query_cb(struct qeth_card *card,
@@ -2414,72 +2368,77 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
2414 struct qeth_ipa_cmd *cmd; 2368 struct qeth_ipa_cmd *cmd;
2415 struct qeth_arp_query_data *qdata; 2369 struct qeth_arp_query_data *qdata;
2416 struct qeth_arp_query_info *qinfo; 2370 struct qeth_arp_query_info *qinfo;
2417 int entry_size;
2418 int uentry_size;
2419 int i; 2371 int i;
2372 int e;
2373 int entrybytes_done;
2374 int stripped_bytes;
2375 __u8 do_strip_entries;
2420 2376
2421 QETH_CARD_TEXT(card, 4, "arpquecb"); 2377 QETH_CARD_TEXT(card, 3, "arpquecb");
2422 2378
2423 qinfo = (struct qeth_arp_query_info *) reply->param; 2379 qinfo = (struct qeth_arp_query_info *) reply->param;
2424 cmd = (struct qeth_ipa_cmd *) data; 2380 cmd = (struct qeth_ipa_cmd *) data;
2381 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
2425 if (cmd->hdr.return_code) { 2382 if (cmd->hdr.return_code) {
2426 QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code); 2383 QETH_CARD_TEXT(card, 4, "arpcberr");
2384 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
2427 return 0; 2385 return 0;
2428 } 2386 }
2429 if (cmd->data.setassparms.hdr.return_code) { 2387 if (cmd->data.setassparms.hdr.return_code) {
2430 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 2388 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2431 QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code); 2389 QETH_CARD_TEXT(card, 4, "setaperr");
2390 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
2432 return 0; 2391 return 0;
2433 } 2392 }
2434 qdata = &cmd->data.setassparms.data.query_arp; 2393 qdata = &cmd->data.setassparms.data.query_arp;
2435 switch (qdata->reply_bits) {
2436 case 5:
2437 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2438 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2439 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2440 break;
2441 case 7:
2442 /* fall through to default */
2443 default:
2444 /* tr is the same as eth -> entry7 */
2445 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2446 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2447 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2448 break;
2449 }
2450 /* check if there is enough room in userspace */
2451 if ((qinfo->udata_len - qinfo->udata_offset) <
2452 qdata->no_entries * uentry_size){
2453 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
2454 cmd->hdr.return_code = -ENOMEM;
2455 goto out_error;
2456 }
2457 QETH_CARD_TEXT_(card, 4, "anore%i",
2458 cmd->data.setassparms.hdr.number_of_replies);
2459 QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2460 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 2394 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
2461 2395
2462 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { 2396 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
2463 /* strip off "media specific information" */ 2397 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
2464 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size, 2398 entrybytes_done = 0;
2465 uentry_size); 2399 for (e = 0; e < qdata->no_entries; ++e) {
2466 } else 2400 char *cur_entry;
2467 /*copy entries to user buffer*/ 2401 __u32 esize;
2468 memcpy(qinfo->udata + qinfo->udata_offset, 2402 struct qeth_arp_entrytype *etype;
2469 (char *)&qdata->data, qdata->no_entries*uentry_size); 2403
2404 cur_entry = &qdata->data + entrybytes_done;
2405 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
2406 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
2407 QETH_CARD_TEXT(card, 4, "pmis");
2408 QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
2409 break;
2410 }
2411 esize = get_arp_entry_size(card, qdata, etype,
2412 do_strip_entries);
2413 QETH_CARD_TEXT_(card, 5, "esz%i", esize);
2414 if (!esize)
2415 break;
2416
2417 if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
2418 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
2419 cmd->hdr.return_code = -ENOMEM;
2420 goto out_error;
2421 }
2470 2422
2471 qinfo->no_entries += qdata->no_entries; 2423 memcpy(qinfo->udata + qinfo->udata_offset,
2472 qinfo->udata_offset += (qdata->no_entries*uentry_size); 2424 &qdata->data + entrybytes_done + stripped_bytes,
2425 esize);
2426 entrybytes_done += esize + stripped_bytes;
2427 qinfo->udata_offset += esize;
2428 ++qinfo->no_entries;
2429 }
2473 /* check if all replies received ... */ 2430 /* check if all replies received ... */
2474 if (cmd->data.setassparms.hdr.seq_no < 2431 if (cmd->data.setassparms.hdr.seq_no <
2475 cmd->data.setassparms.hdr.number_of_replies) 2432 cmd->data.setassparms.hdr.number_of_replies)
2476 return 1; 2433 return 1;
2434 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
2477 memcpy(qinfo->udata, &qinfo->no_entries, 4); 2435 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2478 /* keep STRIP_ENTRIES flag so the user program can distinguish 2436 /* keep STRIP_ENTRIES flag so the user program can distinguish
2479 * stripped entries from normal ones */ 2437 * stripped entries from normal ones */
2480 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2438 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2481 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 2439 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2482 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 2440 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2441 QETH_CARD_TEXT_(card, 4, "rc%i", 0);
2483 return 0; 2442 return 0;
2484out_error: 2443out_error:
2485 i = 0; 2444 i = 0;
@@ -2502,45 +2461,86 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2502 reply_cb, reply_param); 2461 reply_cb, reply_param);
2503} 2462}
2504 2463
2505static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 2464static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
2465 enum qeth_prot_versions prot,
2466 struct qeth_arp_query_info *qinfo)
2506{ 2467{
2507 struct qeth_cmd_buffer *iob; 2468 struct qeth_cmd_buffer *iob;
2508 struct qeth_arp_query_info qinfo = {0, }; 2469 struct qeth_ipa_cmd *cmd;
2509 int tmp; 2470 int tmp;
2510 int rc; 2471 int rc;
2511 2472
2473 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
2474
2475 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2476 IPA_CMD_ASS_ARP_QUERY_INFO,
2477 sizeof(struct qeth_arp_query_data) - sizeof(char),
2478 prot);
2479 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2480 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
2481 cmd->data.setassparms.data.query_arp.reply_bits = 0;
2482 cmd->data.setassparms.data.query_arp.no_entries = 0;
2483 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2484 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2485 qeth_l3_arp_query_cb, (void *)qinfo);
2486 if (rc) {
2487 tmp = rc;
2488 QETH_DBF_MESSAGE(2,
2489 "Error while querying ARP cache on %s: %s "
2490 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2491 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2492 }
2493
2494 return rc;
2495}
2496
2497static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2498{
2499 struct qeth_arp_query_info qinfo = {0, };
2500 int rc;
2501
2512 QETH_CARD_TEXT(card, 3, "arpquery"); 2502 QETH_CARD_TEXT(card, 3, "arpquery");
2513 2503
2514 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 2504 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2515 IPA_ARP_PROCESSING)) { 2505 IPA_ARP_PROCESSING)) {
2516 return -EOPNOTSUPP; 2506 QETH_CARD_TEXT(card, 3, "arpqnsup");
2507 rc = -EOPNOTSUPP;
2508 goto out;
2517 } 2509 }
2518 /* get size of userspace buffer and mask_bits -> 6 bytes */ 2510 /* get size of userspace buffer and mask_bits -> 6 bytes */
2519 if (copy_from_user(&qinfo, udata, 6)) 2511 if (copy_from_user(&qinfo, udata, 6)) {
2520 return -EFAULT; 2512 rc = -EFAULT;
2513 goto out;
2514 }
2521 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 2515 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2522 if (!qinfo.udata) 2516 if (!qinfo.udata) {
2523 return -ENOMEM; 2517 rc = -ENOMEM;
2518 goto out;
2519 }
2524 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 2520 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2525 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2521 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
2526 IPA_CMD_ASS_ARP_QUERY_INFO,
2527 sizeof(int), QETH_PROT_IPV4);
2528
2529 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2530 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2531 qeth_l3_arp_query_cb, (void *)&qinfo);
2532 if (rc) { 2522 if (rc) {
2533 tmp = rc;
2534 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
2535 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2536 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2537 if (copy_to_user(udata, qinfo.udata, 4)) 2523 if (copy_to_user(udata, qinfo.udata, 4))
2538 rc = -EFAULT; 2524 rc = -EFAULT;
2525 goto free_and_out;
2539 } else { 2526 } else {
2540 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 2527#ifdef CONFIG_QETH_IPV6
2528 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
2529 /* fails in case of GuestLAN QDIO mode */
2530 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
2531 &qinfo);
2532 }
2533#endif
2534 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
2535 QETH_CARD_TEXT(card, 4, "qactf");
2541 rc = -EFAULT; 2536 rc = -EFAULT;
2537 goto free_and_out;
2538 }
2539 QETH_CARD_TEXT_(card, 4, "qacts");
2542 } 2540 }
2541free_and_out:
2543 kfree(qinfo.udata); 2542 kfree(qinfo.udata);
2543out:
2544 return rc; 2544 return rc;
2545} 2545}
2546 2546
@@ -2859,7 +2859,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card,
2859 */ 2859 */
2860 if (iph->protocol == IPPROTO_UDP) 2860 if (iph->protocol == IPPROTO_UDP)
2861 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; 2861 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
2862 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; 2862 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
2863 QETH_HDR_EXT_CSUM_HDR_REQ;
2864 iph->check = 0;
2863 if (card->options.performance_stats) 2865 if (card->options.performance_stats)
2864 card->perf_stats.tx_csum++; 2866 card->perf_stats.tx_csum++;
2865} 2867}
@@ -2874,6 +2876,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2874 2876
2875 /*fix header to TSO values ...*/ 2877 /*fix header to TSO values ...*/
2876 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2878 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2879 hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
2877 /*set values which are fix for the first approach ...*/ 2880 /*set values which are fix for the first approach ...*/
2878 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2881 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2879 hdr->ext.imb_hdr_no = 1; 2882 hdr->ext.imb_hdr_no = 1;
@@ -2923,7 +2926,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2923 struct qeth_qdio_out_q *queue = card->qdio.out_qs 2926 struct qeth_qdio_out_q *queue = card->qdio.out_qs
2924 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2927 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2925 int tx_bytes = skb->len; 2928 int tx_bytes = skb->len;
2926 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2929 bool large_send;
2927 int data_offset = -1; 2930 int data_offset = -1;
2928 int nr_frags; 2931 int nr_frags;
2929 2932
@@ -2945,8 +2948,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2945 card->perf_stats.outbound_start_time = qeth_get_micros(); 2948 card->perf_stats.outbound_start_time = qeth_get_micros();
2946 } 2949 }
2947 2950
2948 if (skb_is_gso(skb)) 2951 large_send = skb_is_gso(skb);
2949 large_send = card->options.large_send;
2950 2952
2951 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2953 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2952 (skb_shinfo(skb)->nr_frags == 0)) { 2954 (skb_shinfo(skb)->nr_frags == 0)) {
@@ -2975,7 +2977,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2975 skb_pull(new_skb, ETH_HLEN); 2977 skb_pull(new_skb, ETH_HLEN);
2976 } 2978 }
2977 2979
2978 if (ipv == 6 && card->vlangrp && 2980 if (ipv != 4 && card->vlangrp &&
2979 vlan_tx_tag_present(new_skb)) { 2981 vlan_tx_tag_present(new_skb)) {
2980 skb_push(new_skb, VLAN_HLEN); 2982 skb_push(new_skb, VLAN_HLEN);
2981 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 2983 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
@@ -2995,7 +2997,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2995 /* fix hardware limitation: as long as we do not have sbal 2997 /* fix hardware limitation: as long as we do not have sbal
2996 * chaining we can not send long frag lists 2998 * chaining we can not send long frag lists
2997 */ 2999 */
2998 if (large_send == QETH_LARGE_SEND_TSO) { 3000 if (large_send) {
2999 if (qeth_l3_tso_elements(new_skb) + 1 > 16) { 3001 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
3000 if (skb_linearize(new_skb)) 3002 if (skb_linearize(new_skb))
3001 goto tx_drop; 3003 goto tx_drop;
@@ -3004,8 +3006,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3004 } 3006 }
3005 } 3007 }
3006 3008
3007 if ((large_send == QETH_LARGE_SEND_TSO) && 3009 if (large_send && (cast_type == RTN_UNSPEC)) {
3008 (cast_type == RTN_UNSPEC)) {
3009 hdr = (struct qeth_hdr *)skb_push(new_skb, 3010 hdr = (struct qeth_hdr *)skb_push(new_skb,
3010 sizeof(struct qeth_hdr_tso)); 3011 sizeof(struct qeth_hdr_tso));
3011 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 3012 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
@@ -3040,7 +3041,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3040 3041
3041 if (card->info.type != QETH_CARD_TYPE_IQD) { 3042 if (card->info.type != QETH_CARD_TYPE_IQD) {
3042 int len; 3043 int len;
3043 if (large_send == QETH_LARGE_SEND_TSO) 3044 if (large_send)
3044 len = ((unsigned long)tcp_hdr(new_skb) + 3045 len = ((unsigned long)tcp_hdr(new_skb) +
3045 tcp_hdr(new_skb)->doff * 4) - 3046 tcp_hdr(new_skb)->doff * 4) -
3046 (unsigned long)new_skb->data; 3047 (unsigned long)new_skb->data;
@@ -3061,7 +3062,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3061 if (new_skb != skb) 3062 if (new_skb != skb)
3062 dev_kfree_skb_any(skb); 3063 dev_kfree_skb_any(skb);
3063 if (card->options.performance_stats) { 3064 if (card->options.performance_stats) {
3064 if (large_send != QETH_LARGE_SEND_NO) { 3065 if (large_send) {
3065 card->perf_stats.large_send_bytes += tx_bytes; 3066 card->perf_stats.large_send_bytes += tx_bytes;
3066 card->perf_stats.large_send_cnt++; 3067 card->perf_stats.large_send_cnt++;
3067 } 3068 }
@@ -3100,92 +3101,89 @@ tx_drop:
3100 return NETDEV_TX_OK; 3101 return NETDEV_TX_OK;
3101} 3102}
3102 3103
3103static int qeth_l3_open(struct net_device *dev) 3104static int __qeth_l3_open(struct net_device *dev)
3104{ 3105{
3105 struct qeth_card *card = dev->ml_priv; 3106 struct qeth_card *card = dev->ml_priv;
3107 int rc = 0;
3106 3108
3107 QETH_CARD_TEXT(card, 4, "qethopen"); 3109 QETH_CARD_TEXT(card, 4, "qethopen");
3110 if (card->state == CARD_STATE_UP)
3111 return rc;
3108 if (card->state != CARD_STATE_SOFTSETUP) 3112 if (card->state != CARD_STATE_SOFTSETUP)
3109 return -ENODEV; 3113 return -ENODEV;
3110 card->data.state = CH_STATE_UP; 3114 card->data.state = CH_STATE_UP;
3111 card->state = CARD_STATE_UP; 3115 card->state = CARD_STATE_UP;
3112 netif_start_queue(dev); 3116 netif_start_queue(dev);
3113 3117
3114 if (!card->lan_online && netif_carrier_ok(dev)) 3118 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
3115 netif_carrier_off(dev); 3119 napi_enable(&card->napi);
3116 return 0; 3120 napi_schedule(&card->napi);
3121 } else
3122 rc = -EIO;
3123 return rc;
3117} 3124}
3118 3125
3119static int qeth_l3_stop(struct net_device *dev) 3126static int qeth_l3_open(struct net_device *dev)
3120{ 3127{
3121 struct qeth_card *card = dev->ml_priv; 3128 struct qeth_card *card = dev->ml_priv;
3122 3129
3123 QETH_CARD_TEXT(card, 4, "qethstop"); 3130 QETH_CARD_TEXT(card, 5, "qethope_");
3124 netif_tx_disable(dev); 3131 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
3125 if (card->state == CARD_STATE_UP) 3132 QETH_CARD_TEXT(card, 3, "openREC");
3126 card->state = CARD_STATE_SOFTSETUP; 3133 return -ERESTARTSYS;
3127 return 0; 3134 }
3135 return __qeth_l3_open(dev);
3128} 3136}
3129 3137
3130static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) 3138static int qeth_l3_stop(struct net_device *dev)
3131{ 3139{
3132 struct qeth_card *card = dev->ml_priv; 3140 struct qeth_card *card = dev->ml_priv;
3133 3141
3134 return (card->options.checksum_type == HW_CHECKSUMMING); 3142 QETH_CARD_TEXT(card, 4, "qethstop");
3143 netif_tx_disable(dev);
3144 if (card->state == CARD_STATE_UP) {
3145 card->state = CARD_STATE_SOFTSETUP;
3146 napi_disable(&card->napi);
3147 }
3148 return 0;
3135} 3149}
3136 3150
3137static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 3151static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
3138{ 3152{
3139 struct qeth_card *card = dev->ml_priv; 3153 struct qeth_card *card = dev->ml_priv;
3140 enum qeth_checksum_types csum_type;
3141 3154
3142 if (data) 3155 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
3143 csum_type = HW_CHECKSUMMING; 3156 features &= ~NETIF_F_IP_CSUM;
3144 else 3157 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
3145 csum_type = SW_CHECKSUMMING; 3158 features &= ~NETIF_F_TSO;
3159 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
3160 features &= ~NETIF_F_RXCSUM;
3146 3161
3147 return qeth_l3_set_rx_csum(card, csum_type); 3162 return features;
3148} 3163}
3149 3164
3150static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 3165static int qeth_l3_set_features(struct net_device *dev, u32 features)
3151{ 3166{
3152 struct qeth_card *card = dev->ml_priv; 3167 struct qeth_card *card = dev->ml_priv;
3153 int rc = 0; 3168 u32 changed = dev->features ^ features;
3169 int err;
3154 3170
3155 if (data) { 3171 if (!(changed & NETIF_F_RXCSUM))
3156 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO); 3172 return 0;
3157 } else {
3158 dev->features &= ~NETIF_F_TSO;
3159 card->options.large_send = QETH_LARGE_SEND_NO;
3160 }
3161 return rc;
3162}
3163 3173
3164static int qeth_l3_ethtool_set_tx_csum(struct net_device *dev, u32 data) 3174 if (card->state == CARD_STATE_DOWN ||
3165{ 3175 card->state == CARD_STATE_RECOVER)
3166 struct qeth_card *card = dev->ml_priv; 3176 return 0;
3167 3177
3168 if (data) { 3178 err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM);
3169 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 3179 if (err)
3170 dev->features |= NETIF_F_IP_CSUM; 3180 dev->features = features ^ NETIF_F_RXCSUM;
3171 else
3172 return -EPERM;
3173 } else
3174 dev->features &= ~NETIF_F_IP_CSUM;
3175 3181
3176 return 0; 3182 return err;
3177} 3183}
3178 3184
3179static const struct ethtool_ops qeth_l3_ethtool_ops = { 3185static const struct ethtool_ops qeth_l3_ethtool_ops = {
3180 .get_link = ethtool_op_get_link, 3186 .get_link = ethtool_op_get_link,
3181 .get_tx_csum = ethtool_op_get_tx_csum,
3182 .set_tx_csum = qeth_l3_ethtool_set_tx_csum,
3183 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
3184 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
3185 .get_sg = ethtool_op_get_sg,
3186 .set_sg = ethtool_op_set_sg,
3187 .get_tso = ethtool_op_get_tso,
3188 .set_tso = qeth_l3_ethtool_set_tso,
3189 .get_strings = qeth_core_get_strings, 3187 .get_strings = qeth_core_get_strings,
3190 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3188 .get_ethtool_stats = qeth_core_get_ethtool_stats,
3191 .get_sset_count = qeth_core_get_sset_count, 3189 .get_sset_count = qeth_core_get_sset_count,
@@ -3226,6 +3224,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
3226 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3224 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3227 .ndo_do_ioctl = qeth_l3_do_ioctl, 3225 .ndo_do_ioctl = qeth_l3_do_ioctl,
3228 .ndo_change_mtu = qeth_change_mtu, 3226 .ndo_change_mtu = qeth_change_mtu,
3227 .ndo_fix_features = qeth_l3_fix_features,
3228 .ndo_set_features = qeth_l3_set_features,
3229 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3229 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3230 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3230 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3231 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3231 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
@@ -3241,6 +3241,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3241 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3241 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3242 .ndo_do_ioctl = qeth_l3_do_ioctl, 3242 .ndo_do_ioctl = qeth_l3_do_ioctl,
3243 .ndo_change_mtu = qeth_change_mtu, 3243 .ndo_change_mtu = qeth_change_mtu,
3244 .ndo_fix_features = qeth_l3_fix_features,
3245 .ndo_set_features = qeth_l3_set_features,
3244 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3246 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3245 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3247 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3246 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3248 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
@@ -3271,6 +3273,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3271 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 3273 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
3272 card->dev->dev_id = card->info.unique_id & 3274 card->dev->dev_id = card->info.unique_id &
3273 0xffff; 3275 0xffff;
3276 if (!card->info.guestlan) {
3277 card->dev->hw_features = NETIF_F_SG |
3278 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3279 NETIF_F_TSO;
3280 card->dev->features = NETIF_F_RXCSUM;
3281 }
3274 } 3282 }
3275 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3283 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
3276 card->dev = alloc_netdev(0, "hsi%d", ether_setup); 3284 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
@@ -3293,57 +3301,20 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3293 card->dev->gso_max_size = 15 * PAGE_SIZE; 3301 card->dev->gso_max_size = 15 * PAGE_SIZE;
3294 3302
3295 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3303 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3304 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
3296 return register_netdev(card->dev); 3305 return register_netdev(card->dev);
3297} 3306}
3298 3307
3299static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3300 unsigned int qdio_err, unsigned int queue, int first_element,
3301 int count, unsigned long card_ptr)
3302{
3303 struct net_device *net_dev;
3304 struct qeth_card *card;
3305 struct qeth_qdio_buffer *buffer;
3306 int index;
3307 int i;
3308
3309 card = (struct qeth_card *) card_ptr;
3310 net_dev = card->dev;
3311 if (card->options.performance_stats) {
3312 card->perf_stats.inbound_cnt++;
3313 card->perf_stats.inbound_start_time = qeth_get_micros();
3314 }
3315 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3316 QETH_CARD_TEXT(card, 1, "qdinchk");
3317 QETH_CARD_TEXT_(card, 1, "%04X%04X",
3318 first_element, count);
3319 QETH_CARD_TEXT_(card, 1, "%04X", queue);
3320 qeth_schedule_recovery(card);
3321 return;
3322 }
3323 for (i = first_element; i < (first_element + count); ++i) {
3324 index = i % QDIO_MAX_BUFFERS_PER_Q;
3325 buffer = &card->qdio.in_q->bufs[index];
3326 if (!(qdio_err &&
3327 qeth_check_qdio_errors(card, buffer->buffer,
3328 qdio_err, "qinerr")))
3329 qeth_l3_process_inbound_buffer(card, buffer, index);
3330 /* clear buffer and give back to hardware */
3331 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3332 qeth_queue_input_buffer(card, index);
3333 }
3334 if (card->options.performance_stats)
3335 card->perf_stats.inbound_time += qeth_get_micros() -
3336 card->perf_stats.inbound_start_time;
3337}
3338
3339static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3308static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3340{ 3309{
3341 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3310 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3342 3311
3343 qeth_l3_create_device_attributes(&gdev->dev); 3312 qeth_l3_create_device_attributes(&gdev->dev);
3344 card->options.layer2 = 0; 3313 card->options.layer2 = 0;
3314 card->info.hwtrap = 0;
3315 card->discipline.start_poll = qeth_qdio_start_poll;
3345 card->discipline.input_handler = (qdio_handler_t *) 3316 card->discipline.input_handler = (qdio_handler_t *)
3346 qeth_l3_qdio_input_handler; 3317 qeth_qdio_input_handler;
3347 card->discipline.output_handler = (qdio_handler_t *) 3318 card->discipline.output_handler = (qdio_handler_t *)
3348 qeth_qdio_output_handler; 3319 qeth_qdio_output_handler;
3349 card->discipline.recover = qeth_l3_recover; 3320 card->discipline.recover = qeth_l3_recover;
@@ -3359,17 +3330,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3359 qeth_set_allowed_threads(card, 0, 1); 3330 qeth_set_allowed_threads(card, 0, 1);
3360 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3331 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
3361 3332
3362 if (cgdev->state == CCWGROUP_ONLINE) { 3333 if (cgdev->state == CCWGROUP_ONLINE)
3363 card->use_hard_stop = 1;
3364 qeth_l3_set_offline(cgdev); 3334 qeth_l3_set_offline(cgdev);
3365 }
3366 3335
3367 if (card->dev) { 3336 if (card->dev) {
3368 unregister_netdev(card->dev); 3337 unregister_netdev(card->dev);
3369 card->dev = NULL; 3338 card->dev = NULL;
3370 } 3339 }
3371 3340
3372 qeth_l3_clear_ip_list(card, 0, 0); 3341 qeth_l3_clear_ip_list(card, 0);
3373 qeth_l3_clear_ipato_list(card); 3342 qeth_l3_clear_ipato_list(card);
3374 return; 3343 return;
3375} 3344}
@@ -3394,14 +3363,20 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3394 goto out_remove; 3363 goto out_remove;
3395 } 3364 }
3396 3365
3397 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3398
3399 if (!card->dev && qeth_l3_setup_netdev(card)) { 3366 if (!card->dev && qeth_l3_setup_netdev(card)) {
3400 rc = -ENODEV; 3367 rc = -ENODEV;
3401 goto out_remove; 3368 goto out_remove;
3402 } 3369 }
3403 3370
3371 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
3372 if (card->info.hwtrap &&
3373 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
3374 card->info.hwtrap = 0;
3375 } else
3376 card->info.hwtrap = 0;
3377
3404 card->state = CARD_STATE_HARDSETUP; 3378 card->state = CARD_STATE_HARDSETUP;
3379 memset(&card->rx, 0, sizeof(struct qeth_rx));
3405 qeth_print_status_message(card); 3380 qeth_print_status_message(card);
3406 3381
3407 /* softsetup */ 3382 /* softsetup */
@@ -3414,13 +3389,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3414 dev_warn(&card->gdev->dev, 3389 dev_warn(&card->gdev->dev,
3415 "The LAN is offline\n"); 3390 "The LAN is offline\n");
3416 card->lan_online = 0; 3391 card->lan_online = 0;
3417 goto out; 3392 goto contin;
3418 } 3393 }
3419 rc = -ENODEV; 3394 rc = -ENODEV;
3420 goto out_remove; 3395 goto out_remove;
3421 } else 3396 } else
3422 card->lan_online = 1; 3397 card->lan_online = 1;
3423 3398
3399contin:
3424 rc = qeth_l3_setadapter_parms(card); 3400 rc = qeth_l3_setadapter_parms(card);
3425 if (rc) 3401 if (rc)
3426 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3402 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -3428,7 +3404,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3428 rc = qeth_l3_start_ipassists(card); 3404 rc = qeth_l3_start_ipassists(card);
3429 if (rc) 3405 if (rc)
3430 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3406 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3431 qeth_l3_set_large_send(card, card->options.large_send);
3432 rc = qeth_l3_setrouting_v4(card); 3407 rc = qeth_l3_setrouting_v4(card);
3433 if (rc) 3408 if (rc)
3434 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3409 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
@@ -3445,13 +3420,16 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3445 goto out_remove; 3420 goto out_remove;
3446 } 3421 }
3447 card->state = CARD_STATE_SOFTSETUP; 3422 card->state = CARD_STATE_SOFTSETUP;
3448 netif_carrier_on(card->dev);
3449 3423
3450 qeth_set_allowed_threads(card, 0xffffffff, 0); 3424 qeth_set_allowed_threads(card, 0xffffffff, 0);
3451 qeth_l3_set_ip_addr_list(card); 3425 qeth_l3_set_ip_addr_list(card);
3426 if (card->lan_online)
3427 netif_carrier_on(card->dev);
3428 else
3429 netif_carrier_off(card->dev);
3452 if (recover_flag == CARD_STATE_RECOVER) { 3430 if (recover_flag == CARD_STATE_RECOVER) {
3453 if (recovery_mode) 3431 if (recovery_mode)
3454 qeth_l3_open(card->dev); 3432 __qeth_l3_open(card->dev);
3455 else { 3433 else {
3456 rtnl_lock(); 3434 rtnl_lock();
3457 dev_open(card->dev); 3435 dev_open(card->dev);
@@ -3461,12 +3439,10 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3461 } 3439 }
3462 /* let user_space know that device is online */ 3440 /* let user_space know that device is online */
3463 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3441 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3464out:
3465 mutex_unlock(&card->conf_mutex); 3442 mutex_unlock(&card->conf_mutex);
3466 mutex_unlock(&card->discipline_mutex); 3443 mutex_unlock(&card->discipline_mutex);
3467 return 0; 3444 return 0;
3468out_remove: 3445out_remove:
3469 card->use_hard_stop = 1;
3470 qeth_l3_stop_card(card, 0); 3446 qeth_l3_stop_card(card, 0);
3471 ccw_device_set_offline(CARD_DDEV(card)); 3447 ccw_device_set_offline(CARD_DDEV(card));
3472 ccw_device_set_offline(CARD_WDEV(card)); 3448 ccw_device_set_offline(CARD_WDEV(card));
@@ -3500,6 +3476,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3500 if (card->dev && netif_carrier_ok(card->dev)) 3476 if (card->dev && netif_carrier_ok(card->dev))
3501 netif_carrier_off(card->dev); 3477 netif_carrier_off(card->dev);
3502 recover_flag = card->state; 3478 recover_flag = card->state;
3479 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
3480 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3481 card->info.hwtrap = 1;
3482 }
3503 qeth_l3_stop_card(card, recovery_mode); 3483 qeth_l3_stop_card(card, recovery_mode);
3504 rc = ccw_device_set_offline(CARD_DDEV(card)); 3484 rc = ccw_device_set_offline(CARD_DDEV(card));
3505 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3485 rc2 = ccw_device_set_offline(CARD_WDEV(card));
@@ -3535,12 +3515,8 @@ static int qeth_l3_recover(void *ptr)
3535 QETH_CARD_TEXT(card, 2, "recover2"); 3515 QETH_CARD_TEXT(card, 2, "recover2");
3536 dev_warn(&card->gdev->dev, 3516 dev_warn(&card->gdev->dev,
3537 "A recovery process has been started for the device\n"); 3517 "A recovery process has been started for the device\n");
3538 card->use_hard_stop = 1;
3539 __qeth_l3_set_offline(card->gdev, 1); 3518 __qeth_l3_set_offline(card->gdev, 1);
3540 rc = __qeth_l3_set_online(card->gdev, 1); 3519 rc = __qeth_l3_set_online(card->gdev, 1);
3541 /* don't run another scheduled recovery */
3542 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3543 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3544 if (!rc) 3520 if (!rc)
3545 dev_info(&card->gdev->dev, 3521 dev_info(&card->gdev->dev,
3546 "Device successfully recovered!\n"); 3522 "Device successfully recovered!\n");
@@ -3551,13 +3527,16 @@ static int qeth_l3_recover(void *ptr)
3551 dev_warn(&card->gdev->dev, "The qeth device driver " 3527 dev_warn(&card->gdev->dev, "The qeth device driver "
3552 "failed to recover an error on the device\n"); 3528 "failed to recover an error on the device\n");
3553 } 3529 }
3530 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3531 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3554 return 0; 3532 return 0;
3555} 3533}
3556 3534
3557static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3535static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3558{ 3536{
3559 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3537 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3560 qeth_l3_clear_ip_list(card, 0, 0); 3538 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
3539 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3561 qeth_qdio_clear_card(card, 0); 3540 qeth_qdio_clear_card(card, 0);
3562 qeth_clear_qdio_buffers(card); 3541 qeth_clear_qdio_buffers(card);
3563} 3542}
@@ -3573,7 +3552,8 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
3573 if (gdev->state == CCWGROUP_OFFLINE) 3552 if (gdev->state == CCWGROUP_OFFLINE)
3574 return 0; 3553 return 0;
3575 if (card->state == CARD_STATE_UP) { 3554 if (card->state == CARD_STATE_UP) {
3576 card->use_hard_stop = 1; 3555 if (card->info.hwtrap)
3556 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3577 __qeth_l3_set_offline(card->gdev, 1); 3557 __qeth_l3_set_offline(card->gdev, 1);
3578 } else 3558 } else
3579 __qeth_l3_set_offline(card->gdev, 0); 3559 __qeth_l3_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 67cfa68dcf1b..cd99210296e2 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -15,16 +15,6 @@
15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
16struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) 16struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
17 17
18static const char *qeth_l3_get_checksum_str(struct qeth_card *card)
19{
20 if (card->options.checksum_type == SW_CHECKSUMMING)
21 return "sw";
22 else if (card->options.checksum_type == HW_CHECKSUMMING)
23 return "hw";
24 else
25 return "no";
26}
27
28static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, 18static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
29 struct qeth_routing_info *route, char *buf) 19 struct qeth_routing_info *route, char *buf)
30{ 20{
@@ -295,51 +285,6 @@ out:
295static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show, 285static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
296 qeth_l3_dev_canonical_macaddr_store); 286 qeth_l3_dev_canonical_macaddr_store);
297 287
298static ssize_t qeth_l3_dev_checksum_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
300{
301 struct qeth_card *card = dev_get_drvdata(dev);
302
303 if (!card)
304 return -EINVAL;
305
306 return sprintf(buf, "%s checksumming\n",
307 qeth_l3_get_checksum_str(card));
308}
309
310static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
311 struct device_attribute *attr, const char *buf, size_t count)
312{
313 struct qeth_card *card = dev_get_drvdata(dev);
314 enum qeth_checksum_types csum_type;
315 char *tmp;
316 int rc = 0;
317
318 if (!card)
319 return -EINVAL;
320
321 mutex_lock(&card->conf_mutex);
322 tmp = strsep((char **) &buf, "\n");
323 if (!strcmp(tmp, "sw_checksumming"))
324 csum_type = SW_CHECKSUMMING;
325 else if (!strcmp(tmp, "hw_checksumming"))
326 csum_type = HW_CHECKSUMMING;
327 else if (!strcmp(tmp, "no_checksumming"))
328 csum_type = NO_CHECKSUMMING;
329 else {
330 rc = -EINVAL;
331 goto out;
332 }
333
334 rc = qeth_l3_set_rx_csum(card, csum_type);
335out:
336 mutex_unlock(&card->conf_mutex);
337 return rc ? rc : count;
338}
339
340static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
341 qeth_l3_dev_checksum_store);
342
343static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, 288static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
344 struct device_attribute *attr, char *buf) 289 struct device_attribute *attr, char *buf)
345{ 290{
@@ -402,61 +347,13 @@ out:
402static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, 347static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
403 qeth_l3_dev_sniffer_store); 348 qeth_l3_dev_sniffer_store);
404 349
405static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 struct qeth_card *card = dev_get_drvdata(dev);
409
410 if (!card)
411 return -EINVAL;
412
413 switch (card->options.large_send) {
414 case QETH_LARGE_SEND_NO:
415 return sprintf(buf, "%s\n", "no");
416 case QETH_LARGE_SEND_TSO:
417 return sprintf(buf, "%s\n", "TSO");
418 default:
419 return sprintf(buf, "%s\n", "N/A");
420 }
421}
422
423static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
424 struct device_attribute *attr, const char *buf, size_t count)
425{
426 struct qeth_card *card = dev_get_drvdata(dev);
427 enum qeth_large_send_types type;
428 int rc = 0;
429 char *tmp;
430
431 if (!card)
432 return -EINVAL;
433 tmp = strsep((char **) &buf, "\n");
434 if (!strcmp(tmp, "no"))
435 type = QETH_LARGE_SEND_NO;
436 else if (!strcmp(tmp, "TSO"))
437 type = QETH_LARGE_SEND_TSO;
438 else
439 return -EINVAL;
440
441 mutex_lock(&card->conf_mutex);
442 if (card->options.large_send != type)
443 rc = qeth_l3_set_large_send(card, type);
444 mutex_unlock(&card->conf_mutex);
445 return rc ? rc : count;
446}
447
448static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
449 qeth_l3_dev_large_send_store);
450
451static struct attribute *qeth_l3_device_attrs[] = { 350static struct attribute *qeth_l3_device_attrs[] = {
452 &dev_attr_route4.attr, 351 &dev_attr_route4.attr,
453 &dev_attr_route6.attr, 352 &dev_attr_route6.attr,
454 &dev_attr_fake_broadcast.attr, 353 &dev_attr_fake_broadcast.attr,
455 &dev_attr_broadcast_mode.attr, 354 &dev_attr_broadcast_mode.attr,
456 &dev_attr_canonical_macaddr.attr, 355 &dev_attr_canonical_macaddr.attr,
457 &dev_attr_checksumming.attr,
458 &dev_attr_sniffer.attr, 356 &dev_attr_sniffer.attr,
459 &dev_attr_large_send.attr,
460 NULL, 357 NULL,
461}; 358};
462 359
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf104943..207b7d742443 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], 60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
61 u8 ipuser[16]) 61 u8 ipuser[16])
62{ 62{
63 if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) 63 if (strncmp(ipvmid, "*MSG ", 8) != 0)
64 return -EINVAL; 64 return -EINVAL;
65 /* Path pending from *MSG. */ 65 /* Path pending from *MSG. */
66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); 66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index cb301cc6178c..c454ffebb63e 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o 6 zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
7 zfcp_unit.o
7 8
8obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 96fa1f536394..645b0fcbb370 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -45,8 +45,8 @@ static char *init_device;
45module_param_named(device, init_device, charp, 0400); 45module_param_named(device, init_device, charp, 0400);
46MODULE_PARM_DESC(device, "specify initial device"); 46MODULE_PARM_DESC(device, "specify initial device");
47 47
48static struct kmem_cache *zfcp_cache_hw_align(const char *name, 48static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
49 unsigned long size) 49 unsigned long size)
50{ 50{
51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
52} 52}
@@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
56 struct ccw_device *cdev; 56 struct ccw_device *cdev;
57 struct zfcp_adapter *adapter; 57 struct zfcp_adapter *adapter;
58 struct zfcp_port *port; 58 struct zfcp_port *port;
59 struct zfcp_unit *unit;
60 59
61 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 60 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
62 if (!cdev) 61 if (!cdev)
@@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
72 port = zfcp_get_port_by_wwpn(adapter, wwpn); 71 port = zfcp_get_port_by_wwpn(adapter, wwpn);
73 if (!port) 72 if (!port)
74 goto out_port; 73 goto out_port;
74 flush_work(&port->rport_work);
75 75
76 unit = zfcp_unit_enqueue(port, lun); 76 zfcp_unit_add(port, lun);
77 if (IS_ERR(unit))
78 goto out_unit;
79
80 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
81 zfcp_erp_wait(adapter);
82 flush_work(&unit->scsi_work);
83
84out_unit:
85 put_device(&port->dev); 77 put_device(&port->dev);
78
86out_port: 79out_port:
87 zfcp_ccw_adapter_put(adapter); 80 zfcp_ccw_adapter_put(adapter);
88out_ccw_device: 81out_ccw_device:
@@ -129,35 +122,23 @@ static int __init zfcp_module_init(void)
129{ 122{
130 int retval = -ENOMEM; 123 int retval = -ENOMEM;
131 124
132 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", 125 zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
133 sizeof(struct zfcp_fc_gpn_ft_req)); 126 sizeof(struct fsf_qtcb));
134 if (!zfcp_data.gpn_ft_cache) 127 if (!zfcp_fsf_qtcb_cache)
135 goto out;
136
137 zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
138 sizeof(struct fsf_qtcb));
139 if (!zfcp_data.qtcb_cache)
140 goto out_qtcb_cache; 128 goto out_qtcb_cache;
141 129
142 zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", 130 zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
143 sizeof(struct fsf_status_read_buffer)); 131 sizeof(struct zfcp_fc_req));
144 if (!zfcp_data.sr_buffer_cache) 132 if (!zfcp_fc_req_cache)
145 goto out_sr_cache; 133 goto out_fc_cache;
146 134
147 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", 135 zfcp_scsi_transport_template =
148 sizeof(struct zfcp_fc_gid_pn));
149 if (!zfcp_data.gid_pn_cache)
150 goto out_gid_cache;
151
152 zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
153 sizeof(struct zfcp_fc_els_adisc));
154 if (!zfcp_data.adisc_cache)
155 goto out_adisc_cache;
156
157 zfcp_data.scsi_transport_template =
158 fc_attach_transport(&zfcp_transport_functions); 136 fc_attach_transport(&zfcp_transport_functions);
159 if (!zfcp_data.scsi_transport_template) 137 if (!zfcp_scsi_transport_template)
160 goto out_transport; 138 goto out_transport;
139 scsi_transport_reserve_device(zfcp_scsi_transport_template,
140 sizeof(struct zfcp_scsi_dev));
141
161 142
162 retval = misc_register(&zfcp_cfdc_misc); 143 retval = misc_register(&zfcp_cfdc_misc);
163 if (retval) { 144 if (retval) {
@@ -179,18 +160,12 @@ static int __init zfcp_module_init(void)
179out_ccw_register: 160out_ccw_register:
180 misc_deregister(&zfcp_cfdc_misc); 161 misc_deregister(&zfcp_cfdc_misc);
181out_misc: 162out_misc:
182 fc_release_transport(zfcp_data.scsi_transport_template); 163 fc_release_transport(zfcp_scsi_transport_template);
183out_transport: 164out_transport:
184 kmem_cache_destroy(zfcp_data.adisc_cache); 165 kmem_cache_destroy(zfcp_fc_req_cache);
185out_adisc_cache: 166out_fc_cache:
186 kmem_cache_destroy(zfcp_data.gid_pn_cache); 167 kmem_cache_destroy(zfcp_fsf_qtcb_cache);
187out_gid_cache:
188 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
189out_sr_cache:
190 kmem_cache_destroy(zfcp_data.qtcb_cache);
191out_qtcb_cache: 168out_qtcb_cache:
192 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
193out:
194 return retval; 169 return retval;
195} 170}
196 171
@@ -200,41 +175,14 @@ static void __exit zfcp_module_exit(void)
200{ 175{
201 ccw_driver_unregister(&zfcp_ccw_driver); 176 ccw_driver_unregister(&zfcp_ccw_driver);
202 misc_deregister(&zfcp_cfdc_misc); 177 misc_deregister(&zfcp_cfdc_misc);
203 fc_release_transport(zfcp_data.scsi_transport_template); 178 fc_release_transport(zfcp_scsi_transport_template);
204 kmem_cache_destroy(zfcp_data.adisc_cache); 179 kmem_cache_destroy(zfcp_fc_req_cache);
205 kmem_cache_destroy(zfcp_data.gid_pn_cache); 180 kmem_cache_destroy(zfcp_fsf_qtcb_cache);
206 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
207 kmem_cache_destroy(zfcp_data.qtcb_cache);
208 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
209} 181}
210 182
211module_exit(zfcp_module_exit); 183module_exit(zfcp_module_exit);
212 184
213/** 185/**
214 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
215 * @port: pointer to port to search for unit
216 * @fcp_lun: FCP LUN to search for
217 *
218 * Returns: pointer to zfcp_unit or NULL
219 */
220struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
221{
222 unsigned long flags;
223 struct zfcp_unit *unit;
224
225 read_lock_irqsave(&port->unit_list_lock, flags);
226 list_for_each_entry(unit, &port->unit_list, list)
227 if (unit->fcp_lun == fcp_lun) {
228 if (!get_device(&unit->dev))
229 unit = NULL;
230 read_unlock_irqrestore(&port->unit_list_lock, flags);
231 return unit;
232 }
233 read_unlock_irqrestore(&port->unit_list_lock, flags);
234 return NULL;
235}
236
237/**
238 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 186 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
239 * @adapter: pointer to adapter to search for port 187 * @adapter: pointer to adapter to search for port
240 * @wwpn: wwpn to search for 188 * @wwpn: wwpn to search for
@@ -259,92 +207,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
259 return NULL; 207 return NULL;
260} 208}
261 209
262/**
263 * zfcp_unit_release - dequeue unit
264 * @dev: pointer to device
265 *
266 * waits until all work is done on unit and removes it then from the unit->list
267 * of the associated port.
268 */
269static void zfcp_unit_release(struct device *dev)
270{
271 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
272
273 put_device(&unit->port->dev);
274 kfree(unit);
275}
276
277/**
278 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
279 * @port: pointer to port where unit is added
280 * @fcp_lun: FCP LUN of unit to be enqueued
281 * Returns: pointer to enqueued unit on success, ERR_PTR on error
282 *
283 * Sets up some unit internal structures and creates sysfs entry.
284 */
285struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
286{
287 struct zfcp_unit *unit;
288 int retval = -ENOMEM;
289
290 get_device(&port->dev);
291
292 unit = zfcp_get_unit_by_lun(port, fcp_lun);
293 if (unit) {
294 put_device(&unit->dev);
295 retval = -EEXIST;
296 goto err_out;
297 }
298
299 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
300 if (!unit)
301 goto err_out;
302
303 unit->port = port;
304 unit->fcp_lun = fcp_lun;
305 unit->dev.parent = &port->dev;
306 unit->dev.release = zfcp_unit_release;
307
308 if (dev_set_name(&unit->dev, "0x%016llx",
309 (unsigned long long) fcp_lun)) {
310 kfree(unit);
311 goto err_out;
312 }
313 retval = -EINVAL;
314
315 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
316
317 spin_lock_init(&unit->latencies.lock);
318 unit->latencies.write.channel.min = 0xFFFFFFFF;
319 unit->latencies.write.fabric.min = 0xFFFFFFFF;
320 unit->latencies.read.channel.min = 0xFFFFFFFF;
321 unit->latencies.read.fabric.min = 0xFFFFFFFF;
322 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
323 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
324
325 if (device_register(&unit->dev)) {
326 put_device(&unit->dev);
327 goto err_out;
328 }
329
330 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
331 goto err_out_put;
332
333 write_lock_irq(&port->unit_list_lock);
334 list_add_tail(&unit->list, &port->unit_list);
335 write_unlock_irq(&port->unit_list_lock);
336
337 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
338
339 return unit;
340
341err_out_put:
342 device_unregister(&unit->dev);
343err_out:
344 put_device(&port->dev);
345 return ERR_PTR(retval);
346}
347
348static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 210static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
349{ 211{
350 adapter->pool.erp_req = 212 adapter->pool.erp_req =
@@ -374,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
374 return -ENOMEM; 236 return -ENOMEM;
375 237
376 adapter->pool.qtcb_pool = 238 adapter->pool.qtcb_pool =
377 mempool_create_slab_pool(4, zfcp_data.qtcb_cache); 239 mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
378 if (!adapter->pool.qtcb_pool) 240 if (!adapter->pool.qtcb_pool)
379 return -ENOMEM; 241 return -ENOMEM;
380 242
381 adapter->pool.status_read_data = 243 BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
382 mempool_create_slab_pool(FSF_STATUS_READS_RECOM, 244 adapter->pool.sr_data =
383 zfcp_data.sr_buffer_cache); 245 mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
384 if (!adapter->pool.status_read_data) 246 if (!adapter->pool.sr_data)
385 return -ENOMEM; 247 return -ENOMEM;
386 248
387 adapter->pool.gid_pn = 249 adapter->pool.gid_pn =
388 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 250 mempool_create_slab_pool(1, zfcp_fc_req_cache);
389 if (!adapter->pool.gid_pn) 251 if (!adapter->pool.gid_pn)
390 return -ENOMEM; 252 return -ENOMEM;
391 253
@@ -404,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
404 mempool_destroy(adapter->pool.qtcb_pool); 266 mempool_destroy(adapter->pool.qtcb_pool);
405 if (adapter->pool.status_read_req) 267 if (adapter->pool.status_read_req)
406 mempool_destroy(adapter->pool.status_read_req); 268 mempool_destroy(adapter->pool.status_read_req);
407 if (adapter->pool.status_read_data) 269 if (adapter->pool.sr_data)
408 mempool_destroy(adapter->pool.status_read_data); 270 mempool_destroy(adapter->pool.sr_data);
409 if (adapter->pool.gid_pn) 271 if (adapter->pool.gid_pn)
410 mempool_destroy(adapter->pool.gid_pn); 272 mempool_destroy(adapter->pool.gid_pn);
411} 273}
@@ -425,8 +287,7 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
425 if (zfcp_fsf_status_read(adapter->qdio)) { 287 if (zfcp_fsf_status_read(adapter->qdio)) {
426 if (atomic_read(&adapter->stat_miss) >= 288 if (atomic_read(&adapter->stat_miss) >=
427 adapter->stat_read_buf_num) { 289 adapter->stat_read_buf_num) {
428 zfcp_erp_adapter_reopen(adapter, 0, "axsref1", 290 zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
429 NULL);
430 return 1; 291 return 1;
431 } 292 }
432 break; 293 break;
@@ -501,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
501 362
502 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); 363 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
503 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); 364 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
365 INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
504 366
505 if (zfcp_qdio_setup(adapter)) 367 if (zfcp_qdio_setup(adapter))
506 goto failed; 368 goto failed;
@@ -552,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
552 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; 414 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
553 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 415 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
554 416
555 if (!zfcp_adapter_scsi_register(adapter)) 417 if (!zfcp_scsi_adapter_register(adapter))
556 return adapter; 418 return adapter;
557 419
558failed: 420failed:
@@ -566,14 +428,15 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
566 428
567 cancel_work_sync(&adapter->scan_work); 429 cancel_work_sync(&adapter->scan_work);
568 cancel_work_sync(&adapter->stat_work); 430 cancel_work_sync(&adapter->stat_work);
431 cancel_work_sync(&adapter->ns_up_work);
569 zfcp_destroy_adapter_work_queue(adapter); 432 zfcp_destroy_adapter_work_queue(adapter);
570 433
571 zfcp_fc_wka_ports_force_offline(adapter->gs); 434 zfcp_fc_wka_ports_force_offline(adapter->gs);
572 zfcp_adapter_scsi_unregister(adapter); 435 zfcp_scsi_adapter_unregister(adapter);
573 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); 436 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
574 437
575 zfcp_erp_thread_kill(adapter); 438 zfcp_erp_thread_kill(adapter);
576 zfcp_dbf_adapter_unregister(adapter->dbf); 439 zfcp_dbf_adapter_unregister(adapter);
577 zfcp_qdio_destroy(adapter->qdio); 440 zfcp_qdio_destroy(adapter->qdio);
578 441
579 zfcp_ccw_adapter_put(adapter); /* final put to release */ 442 zfcp_ccw_adapter_put(adapter); /* final put to release */
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index ce1cc7a11fb4..e8b7cee62046 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -46,10 +46,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 if (!adapter) 46 if (!adapter)
47 return 0; 47 return 0;
48 48
49 zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, 49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
50 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
51 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
52 "ccresu2", NULL); 51 "ccresu2");
53 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
54 flush_work(&adapter->scan_work); 53 flush_work(&adapter->scan_work);
55 54
@@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); 163 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
165 adapter->req_no = 0; 164 adapter->req_no = 0;
166 165
167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 166 zfcp_ccw_activate(cdev);
168 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
170 "ccsonl2", NULL);
171 zfcp_erp_wait(adapter);
172
173 flush_work(&adapter->scan_work);
174
175 zfcp_ccw_adapter_put(adapter); 167 zfcp_ccw_adapter_put(adapter);
176 return 0; 168 return 0;
177} 169}
@@ -190,7 +182,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
190 if (!adapter) 182 if (!adapter)
191 return 0; 183 return 0;
192 184
193 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 185 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
194 zfcp_erp_wait(adapter); 186 zfcp_erp_wait(adapter);
195 187
196 zfcp_ccw_adapter_put(adapter); 188 zfcp_ccw_adapter_put(adapter);
@@ -215,25 +207,24 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
215 switch (event) { 207 switch (event) {
216 case CIO_GONE: 208 case CIO_GONE:
217 dev_warn(&cdev->dev, "The FCP device has been detached\n"); 209 dev_warn(&cdev->dev, "The FCP device has been detached\n");
218 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); 210 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
219 break; 211 break;
220 case CIO_NO_PATH: 212 case CIO_NO_PATH:
221 dev_warn(&cdev->dev, 213 dev_warn(&cdev->dev,
222 "The CHPID for the FCP device is offline\n"); 214 "The CHPID for the FCP device is offline\n");
223 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); 215 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
224 break; 216 break;
225 case CIO_OPER: 217 case CIO_OPER:
226 dev_info(&cdev->dev, "The FCP device is operational again\n"); 218 dev_info(&cdev->dev, "The FCP device is operational again\n");
227 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 219 zfcp_erp_set_adapter_status(adapter,
228 ZFCP_STATUS_COMMON_RUNNING, 220 ZFCP_STATUS_COMMON_RUNNING);
229 ZFCP_SET);
230 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 221 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
231 "ccnoti4", NULL); 222 "ccnoti4");
232 break; 223 break;
233 case CIO_BOXED: 224 case CIO_BOXED:
234 dev_warn(&cdev->dev, "The FCP device did not respond within " 225 dev_warn(&cdev->dev, "The FCP device did not respond within "
235 "the specified time\n"); 226 "the specified time\n");
236 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 227 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
237 break; 228 break;
238 } 229 }
239 230
@@ -252,7 +243,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
252 if (!adapter) 243 if (!adapter)
253 return; 244 return;
254 245
255 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 246 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
256 zfcp_erp_wait(adapter); 247 zfcp_erp_wait(adapter);
257 zfcp_erp_thread_kill(adapter); 248 zfcp_erp_thread_kill(adapter);
258 249
@@ -260,8 +251,10 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
260} 251}
261 252
262struct ccw_driver zfcp_ccw_driver = { 253struct ccw_driver zfcp_ccw_driver = {
263 .owner = THIS_MODULE, 254 .driver = {
264 .name = "zfcp", 255 .owner = THIS_MODULE,
256 .name = "zfcp",
257 },
265 .ids = zfcp_ccw_device_id, 258 .ids = zfcp_ccw_device_id,
266 .probe = zfcp_ccw_probe, 259 .probe = zfcp_ccw_probe,
267 .remove = zfcp_ccw_remove, 260 .remove = zfcp_ccw_remove,
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fcbd2b756da4..303dde09d294 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -2,9 +2,10 @@
2 * zfcp device driver 2 * zfcp device driver
3 * 3 *
4 * Userspace interface for accessing the 4 * Userspace interface for accessing the
5 * Access Control Lists / Control File Data Channel 5 * Access Control Lists / Control File Data Channel;
6 * handling of response code and states for ports and LUNs.
6 * 7 *
7 * Copyright IBM Corporation 2008, 2009 8 * Copyright IBM Corporation 2008, 2010
8 */ 9 */
9 10
10#define KMSG_COMPONENT "zfcp" 11#define KMSG_COMPONENT "zfcp"
@@ -251,8 +252,9 @@ static const struct file_operations zfcp_cfdc_fops = {
251 .open = nonseekable_open, 252 .open = nonseekable_open,
252 .unlocked_ioctl = zfcp_cfdc_dev_ioctl, 253 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
253#ifdef CONFIG_COMPAT 254#ifdef CONFIG_COMPAT
254 .compat_ioctl = zfcp_cfdc_dev_ioctl 255 .compat_ioctl = zfcp_cfdc_dev_ioctl,
255#endif 256#endif
257 .llseek = no_llseek,
256}; 258};
257 259
258struct miscdevice zfcp_cfdc_misc = { 260struct miscdevice zfcp_cfdc_misc = {
@@ -260,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = {
260 .name = "zfcp_cfdc", 262 .name = "zfcp_cfdc",
261 .fops = &zfcp_cfdc_fops, 263 .fops = &zfcp_cfdc_fops,
262}; 264};
265
266/**
267 * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
268 * @adapter: Adapter where the Access Control Table (ACT) changed
269 *
270 * After a change in the adapter ACT, check if access to any
271 * previously denied resources is now possible.
272 */
273void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
274{
275 unsigned long flags;
276 struct zfcp_port *port;
277 struct scsi_device *sdev;
278 struct zfcp_scsi_dev *zfcp_sdev;
279 int status;
280
281 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
282 return;
283
284 read_lock_irqsave(&adapter->port_list_lock, flags);
285 list_for_each_entry(port, &adapter->port_list, list) {
286 status = atomic_read(&port->status);
287 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
288 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
289 zfcp_erp_port_reopen(port,
290 ZFCP_STATUS_COMMON_ERP_FAILED,
291 "cfaac_1");
292 }
293 read_unlock_irqrestore(&adapter->port_list_lock, flags);
294
295 shost_for_each_device(sdev, port->adapter->scsi_host) {
296 zfcp_sdev = sdev_to_zfcp(sdev);
297 status = atomic_read(&zfcp_sdev->status);
298 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
299 (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
300 zfcp_erp_lun_reopen(sdev,
301 ZFCP_STATUS_COMMON_ERP_FAILED,
302 "cfaac_2");
303 }
304}
305
306static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
307{
308 u16 subtable = table >> 16;
309 u16 rule = table & 0xffff;
310 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
311
312 if (subtable && subtable < ARRAY_SIZE(act_type))
313 dev_warn(&adapter->ccw_device->dev,
314 "Access denied according to ACT rule type %s, "
315 "rule %d\n", act_type[subtable], rule);
316}
317
318/**
319 * zfcp_cfdc_port_denied - Process "access denied" for port
320 * @port: The port where the access has been denied
321 * @qual: The FSF status qualifier for the access denied FSF status
322 */
323void zfcp_cfdc_port_denied(struct zfcp_port *port,
324 union fsf_status_qual *qual)
325{
326 dev_warn(&port->adapter->ccw_device->dev,
327 "Access denied to port 0x%016Lx\n",
328 (unsigned long long)port->wwpn);
329
330 zfcp_act_eval_err(port->adapter, qual->halfword[0]);
331 zfcp_act_eval_err(port->adapter, qual->halfword[1]);
332 zfcp_erp_set_port_status(port,
333 ZFCP_STATUS_COMMON_ERP_FAILED |
334 ZFCP_STATUS_COMMON_ACCESS_DENIED);
335}
336
337/**
338 * zfcp_cfdc_lun_denied - Process "access denied" for LUN
339 * @sdev: The SCSI device / LUN where the access has been denied
340 * @qual: The FSF status qualifier for the access denied FSF status
341 */
342void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
343 union fsf_status_qual *qual)
344{
345 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
346
347 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
348 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
349 zfcp_scsi_dev_lun(sdev),
350 (unsigned long long)zfcp_sdev->port->wwpn);
351 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
352 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
353 zfcp_erp_set_lun_status(sdev,
354 ZFCP_STATUS_COMMON_ERP_FAILED |
355 ZFCP_STATUS_COMMON_ACCESS_DENIED);
356
357 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
358 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
359}
360
361/**
362 * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
363 * @sdev: The LUN / SCSI device where sharing violation occurred
364 * @qual: The FSF status qualifier from the LUN sharing violation
365 */
366void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
367 union fsf_status_qual *qual)
368{
369 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
370
371 if (qual->word[0])
372 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
373 "LUN 0x%Lx on port 0x%Lx is already in "
374 "use by CSS%d, MIF Image ID %x\n",
375 zfcp_scsi_dev_lun(sdev),
376 (unsigned long long)zfcp_sdev->port->wwpn,
377 qual->fsf_queue_designator.cssid,
378 qual->fsf_queue_designator.hla);
379 else
380 zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
381
382 zfcp_erp_set_lun_status(sdev,
383 ZFCP_STATUS_COMMON_ERP_FAILED |
384 ZFCP_STATUS_COMMON_ACCESS_DENIED);
385 atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
386 atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
387}
388
389/**
390 * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
391 * @sdev: The SCSI device / LUN where to evaluate the status
392 * @bottom: The qtcb bottom with the status from the "open lun"
393 *
394 * Returns: 0 if LUN is usable, -EACCES if the access control table
395 * reports an unsupported configuration.
396 */
397int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
398 struct fsf_qtcb_bottom_support *bottom)
399{
400 int shared, rw;
401 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
402 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
403
404 if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
405 !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
406 zfcp_ccw_priv_sch(adapter))
407 return 0;
408
409 shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
410 rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
411
412 if (shared)
413 atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
414
415 if (!rw) {
416 atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
417 dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
418 "0x%016Lx on port 0x%016Lx opened read-only\n",
419 zfcp_scsi_dev_lun(sdev),
420 (unsigned long long)zfcp_sdev->port->wwpn);
421 }
422
423 if (!shared && !rw) {
424 dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
425 "not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
426 zfcp_scsi_dev_lun(sdev),
427 (unsigned long long)zfcp_sdev->port->wwpn);
428 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
429 zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
430 return -EACCES;
431 }
432
433 if (shared && rw) {
434 dev_err(&adapter->ccw_device->dev,
435 "Shared read-write access not supported "
436 "(LUN 0x%016Lx, port 0x%016Lx)\n",
437 zfcp_scsi_dev_lun(sdev),
438 (unsigned long long)zfcp_sdev->port->wwpn);
439 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
440 zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
441 return -EACCES;
442 }
443
444 return 0;
445}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a86117b0d6e1..96d1462e0bf5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -22,982 +22,392 @@ module_param(dbfsize, uint, 0400);
22MODULE_PARM_DESC(dbfsize, 22MODULE_PARM_DESC(dbfsize,
23 "number of pages for each debug feature area (default 4)"); 23 "number of pages for each debug feature area (default 4)");
24 24
25static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, 25static inline unsigned int zfcp_dbf_plen(unsigned int offset)
26 int level, char *from, int from_len)
27{ 26{
28 int offset; 27 return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
29 struct zfcp_dbf_dump *dump = to;
30 int room = to_len - sizeof(*dump);
31
32 for (offset = 0; offset < from_len; offset += dump->size) {
33 memset(to, 0, to_len);
34 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
35 dump->total_size = from_len;
36 dump->offset = offset;
37 dump->size = min(from_len - offset, room);
38 memcpy(dump->data, from + offset, dump->size);
39 debug_event(dbf, level, dump, dump->size + sizeof(*dump));
40 }
41} 28}
42 29
43static void zfcp_dbf_tag(char **p, const char *label, const char *tag) 30static inline
31void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
32 u64 req_id)
44{ 33{
45 int i; 34 struct zfcp_dbf_pay *pl = &dbf->pay_buf;
46 35 u16 offset = 0, rec_length;
47 *p += sprintf(*p, "%-24s", label);
48 for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
49 *p += sprintf(*p, "%c", tag[i]);
50 *p += sprintf(*p, "\n");
51}
52 36
53static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2) 37 spin_lock(&dbf->pay_lock);
54{ 38 memset(pl, 0, sizeof(*pl));
55 *buf += sprintf(*buf, "%-24s%s\n", s1, s2); 39 pl->fsf_req_id = req_id;
56} 40 memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
57 41
58static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...) 42 while (offset < length) {
59{ 43 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
60 va_list arg; 44 (u16) (length - offset));
45 memcpy(pl->data, data + offset, rec_length);
46 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
61 47
62 *buf += sprintf(*buf, "%-24s", s); 48 offset += rec_length;
63 va_start(arg, format); 49 pl->counter++;
64 *buf += vsprintf(*buf, format, arg);
65 va_end(arg);
66 *buf += sprintf(*buf, "\n");
67}
68
69static void zfcp_dbf_outd(char **p, const char *label, char *buffer,
70 int buflen, int offset, int total_size)
71{
72 if (!offset)
73 *p += sprintf(*p, "%-24s ", label);
74 while (buflen--) {
75 if (offset > 0) {
76 if ((offset % 32) == 0)
77 *p += sprintf(*p, "\n%-24c ", ' ');
78 else if ((offset % 4) == 0)
79 *p += sprintf(*p, " ");
80 }
81 *p += sprintf(*p, "%02x", *buffer++);
82 if (++offset == total_size) {
83 *p += sprintf(*p, "\n");
84 break;
85 }
86 } 50 }
87 if (!total_size)
88 *p += sprintf(*p, "\n");
89}
90 51
91static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, 52 spin_unlock(&dbf->pay_lock);
92 int area, debug_entry_t *entry, char *out_buf)
93{
94 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
95 struct timespec t;
96 char *p = out_buf;
97
98 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
99 stck_to_timespec(entry->id.stck, &t);
100 zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
101 t.tv_sec, t.tv_nsec);
102 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
103 } else {
104 zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset,
105 dump->total_size);
106 if ((dump->offset + dump->size) == dump->total_size)
107 p += sprintf(p, "\n");
108 }
109 return p - out_buf;
110} 53}
111 54
112void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, 55/**
113 struct zfcp_fsf_req *fsf_req, 56 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
114 struct zfcp_dbf *dbf) 57 * @tag: tag indicating which kind of unsolicited status has been received
58 * @req: request for which a response was received
59 */
60void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
115{ 61{
116 struct fsf_qtcb *qtcb = fsf_req->qtcb; 62 struct zfcp_dbf *dbf = req->adapter->dbf;
117 union fsf_prot_status_qual *prot_status_qual = 63 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
118 &qtcb->prefix.prot_status_qual; 64 struct fsf_qtcb_header *q_head = &req->qtcb->header;
119 union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; 65 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
120 struct scsi_cmnd *scsi_cmnd;
121 struct zfcp_port *port;
122 struct zfcp_unit *unit;
123 struct zfcp_send_els *send_els;
124 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
125 struct zfcp_dbf_hba_record_response *response = &rec->u.response;
126 unsigned long flags; 66 unsigned long flags;
127 67
128 spin_lock_irqsave(&dbf->hba_lock, flags); 68 spin_lock_irqsave(&dbf->hba_lock, flags);
129 memset(rec, 0, sizeof(*rec)); 69 memset(rec, 0, sizeof(*rec));
130 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
131 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
132
133 response->fsf_command = fsf_req->fsf_command;
134 response->fsf_reqid = fsf_req->req_id;
135 response->fsf_seqno = fsf_req->seq_no;
136 response->fsf_issued = fsf_req->issued;
137 response->fsf_prot_status = qtcb->prefix.prot_status;
138 response->fsf_status = qtcb->header.fsf_status;
139 memcpy(response->fsf_prot_status_qual,
140 prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
141 memcpy(response->fsf_status_qual,
142 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
143 response->fsf_req_status = fsf_req->status;
144 response->sbal_first = fsf_req->qdio_req.sbal_first;
145 response->sbal_last = fsf_req->qdio_req.sbal_last;
146 response->sbal_response = fsf_req->qdio_req.sbal_response;
147 response->pool = fsf_req->pool != NULL;
148 response->erp_action = (unsigned long)fsf_req->erp_action;
149
150 switch (fsf_req->fsf_command) {
151 case FSF_QTCB_FCP_CMND:
152 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
153 break;
154 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
155 if (scsi_cmnd) {
156 response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
157 response->u.fcp.serial = scsi_cmnd->serial_number;
158 response->u.fcp.data_dir =
159 qtcb->bottom.io.data_direction;
160 }
161 break;
162
163 case FSF_QTCB_OPEN_PORT_WITH_DID:
164 case FSF_QTCB_CLOSE_PORT:
165 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
166 port = (struct zfcp_port *)fsf_req->data;
167 response->u.port.wwpn = port->wwpn;
168 response->u.port.d_id = port->d_id;
169 response->u.port.port_handle = qtcb->header.port_handle;
170 break;
171
172 case FSF_QTCB_OPEN_LUN:
173 case FSF_QTCB_CLOSE_LUN:
174 unit = (struct zfcp_unit *)fsf_req->data;
175 port = unit->port;
176 response->u.unit.wwpn = port->wwpn;
177 response->u.unit.fcp_lun = unit->fcp_lun;
178 response->u.unit.port_handle = qtcb->header.port_handle;
179 response->u.unit.lun_handle = qtcb->header.lun_handle;
180 break;
181
182 case FSF_QTCB_SEND_ELS:
183 send_els = (struct zfcp_send_els *)fsf_req->data;
184 response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
185 break;
186
187 case FSF_QTCB_ABORT_FCP_CMND:
188 case FSF_QTCB_SEND_GENERIC:
189 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
190 case FSF_QTCB_EXCHANGE_PORT_DATA:
191 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
192 case FSF_QTCB_UPLOAD_CONTROL_FILE:
193 break;
194 }
195
196 debug_event(dbf->hba, level, rec, sizeof(*rec));
197 70
198 /* have fcp channel microcode fixed to use as little as possible */ 71 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
199 if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { 72 rec->id = ZFCP_DBF_HBA_RES;
200 /* adjust length skipping trailing zeros */ 73 rec->fsf_req_id = req->req_id;
201 char *buf = (char *)qtcb + qtcb->header.log_start; 74 rec->fsf_req_status = req->status;
202 int len = qtcb->header.log_length; 75 rec->fsf_cmd = req->fsf_command;
203 for (; len && !buf[len - 1]; len--); 76 rec->fsf_seq_no = req->seq_no;
204 zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, 77 rec->u.res.req_issued = req->issued;
205 len); 78 rec->u.res.prot_status = q_pref->prot_status;
79 rec->u.res.fsf_status = q_head->fsf_status;
80
81 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
82 FSF_PROT_STATUS_QUAL_SIZE);
83 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
84 FSF_STATUS_QUALIFIER_SIZE);
85
86 if (req->fsf_command != FSF_QTCB_FCP_CMND) {
87 rec->pl_len = q_head->log_length;
88 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
89 rec->pl_len, "fsf_res", req->req_id);
206 } 90 }
207 91
208 spin_unlock_irqrestore(&dbf->hba_lock, flags); 92 debug_event(dbf->hba, 1, rec, sizeof(*rec));
209}
210
211void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf,
212 struct fsf_status_read_buffer *status_buffer)
213{
214 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
215 unsigned long flags;
216
217 spin_lock_irqsave(&dbf->hba_lock, flags);
218 memset(rec, 0, sizeof(*rec));
219 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
220 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
221
222 rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss);
223 if (status_buffer != NULL) {
224 rec->u.status.status_type = status_buffer->status_type;
225 rec->u.status.status_subtype = status_buffer->status_subtype;
226 memcpy(&rec->u.status.queue_designator,
227 &status_buffer->queue_designator,
228 sizeof(struct fsf_queue_designator));
229
230 switch (status_buffer->status_type) {
231 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
232 rec->u.status.payload_size =
233 ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
234 break;
235
236 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
237 rec->u.status.payload_size =
238 ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
239 break;
240
241 case FSF_STATUS_READ_LINK_DOWN:
242 switch (status_buffer->status_subtype) {
243 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
244 case FSF_STATUS_READ_SUB_FDISC_FAILED:
245 rec->u.status.payload_size =
246 sizeof(struct fsf_link_down_info);
247 }
248 break;
249
250 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
251 rec->u.status.payload_size =
252 ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
253 break;
254 }
255 memcpy(&rec->u.status.payload,
256 &status_buffer->payload, rec->u.status.payload_size);
257 }
258
259 debug_event(dbf->hba, level, rec, sizeof(*rec));
260 spin_unlock_irqrestore(&dbf->hba_lock, flags); 93 spin_unlock_irqrestore(&dbf->hba_lock, flags);
261} 94}
262 95
263/** 96/**
264 * zfcp_dbf_hba_qdio - trace event for QDIO related failure 97 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
265 * @qdio: qdio structure affected by this QDIO related event 98 * @tag: tag indicating which kind of unsolicited status has been received
266 * @qdio_error: as passed by qdio module 99 * @req: request providing the unsolicited status
267 * @sbal_index: first buffer with error condition, as passed by qdio module
268 * @sbal_count: number of buffers affected, as passed by qdio module
269 */ 100 */
270void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, 101void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
271 int sbal_index, int sbal_count)
272{ 102{
273 struct zfcp_dbf_hba_record *r = &dbf->hba_buf; 103 struct zfcp_dbf *dbf = req->adapter->dbf;
104 struct fsf_status_read_buffer *srb = req->data;
105 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
274 unsigned long flags; 106 unsigned long flags;
275 107
276 spin_lock_irqsave(&dbf->hba_lock, flags); 108 spin_lock_irqsave(&dbf->hba_lock, flags);
277 memset(r, 0, sizeof(*r)); 109 memset(rec, 0, sizeof(*rec));
278 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 110
279 r->u.qdio.qdio_error = qdio_error; 111 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
280 r->u.qdio.sbal_index = sbal_index; 112 rec->id = ZFCP_DBF_HBA_USS;
281 r->u.qdio.sbal_count = sbal_count; 113 rec->fsf_req_id = req->req_id;
282 debug_event(dbf->hba, 0, r, sizeof(*r)); 114 rec->fsf_req_status = req->status;
115 rec->fsf_cmd = req->fsf_command;
116
117 if (!srb)
118 goto log;
119
120 rec->u.uss.status_type = srb->status_type;
121 rec->u.uss.status_subtype = srb->status_subtype;
122 rec->u.uss.d_id = ntoh24(srb->d_id);
123 rec->u.uss.lun = srb->fcp_lun;
124 memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
125 sizeof(rec->u.uss.queue_designator));
126
127 /* status read buffer payload length */
128 rec->pl_len = (!srb->length) ? 0 : srb->length -
129 offsetof(struct fsf_status_read_buffer, payload);
130
131 if (rec->pl_len)
132 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
133 "fsf_uss", req->req_id);
134log:
135 debug_event(dbf->hba, 2, rec, sizeof(*rec));
283 spin_unlock_irqrestore(&dbf->hba_lock, flags); 136 spin_unlock_irqrestore(&dbf->hba_lock, flags);
284} 137}
285 138
286/** 139/**
287 * zfcp_dbf_hba_berr - trace event for bit error threshold 140 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
288 * @dbf: dbf structure affected by this QDIO related event 141 * @tag: tag indicating which kind of unsolicited status has been received
289 * @req: fsf request 142 * @req: request which caused the bit_error condition
290 */ 143 */
291void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) 144void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
292{ 145{
293 struct zfcp_dbf_hba_record *r = &dbf->hba_buf; 146 struct zfcp_dbf *dbf = req->adapter->dbf;
147 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
294 struct fsf_status_read_buffer *sr_buf = req->data; 148 struct fsf_status_read_buffer *sr_buf = req->data;
295 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
296 unsigned long flags; 149 unsigned long flags;
297 150
298 spin_lock_irqsave(&dbf->hba_lock, flags); 151 spin_lock_irqsave(&dbf->hba_lock, flags);
299 memset(r, 0, sizeof(*r)); 152 memset(rec, 0, sizeof(*rec));
300 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
301 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
302 debug_event(dbf->hba, 0, r, sizeof(*r));
303 spin_unlock_irqrestore(&dbf->hba_lock, flags);
304}
305static void zfcp_dbf_hba_view_response(char **p,
306 struct zfcp_dbf_hba_record_response *r)
307{
308 struct timespec t;
309
310 zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
311 zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
312 zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
313 stck_to_timespec(r->fsf_issued, &t);
314 zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
315 zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
316 zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
317 zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual,
318 FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE);
319 zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual,
320 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
321 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
322 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
323 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
324 zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
325 zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
326
327 switch (r->fsf_command) {
328 case FSF_QTCB_FCP_CMND:
329 if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
330 break;
331 zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
332 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
333 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
334 *p += sprintf(*p, "\n");
335 break;
336
337 case FSF_QTCB_OPEN_PORT_WITH_DID:
338 case FSF_QTCB_CLOSE_PORT:
339 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
340 zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn);
341 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id);
342 zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle);
343 break;
344
345 case FSF_QTCB_OPEN_LUN:
346 case FSF_QTCB_CLOSE_LUN:
347 zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn);
348 zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun);
349 zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle);
350 zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle);
351 break;
352
353 case FSF_QTCB_SEND_ELS:
354 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
355 break;
356
357 case FSF_QTCB_ABORT_FCP_CMND:
358 case FSF_QTCB_SEND_GENERIC:
359 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
360 case FSF_QTCB_EXCHANGE_PORT_DATA:
361 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
362 case FSF_QTCB_UPLOAD_CONTROL_FILE:
363 break;
364 }
365}
366
367static void zfcp_dbf_hba_view_status(char **p,
368 struct zfcp_dbf_hba_record_status *r)
369{
370 zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
371 zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
372 zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype);
373 zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator,
374 sizeof(struct fsf_queue_designator), 0,
375 sizeof(struct fsf_queue_designator));
376 zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0,
377 r->payload_size);
378}
379
380static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r)
381{
382 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
383 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
384 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
385}
386 153
387static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) 154 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
388{ 155 rec->id = ZFCP_DBF_HBA_BIT;
389 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); 156 rec->fsf_req_id = req->req_id;
390 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); 157 rec->fsf_req_status = req->status;
391 zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count); 158 rec->fsf_cmd = req->fsf_command;
392 zfcp_dbf_out(p, "prim_seq_err", "%d", 159 memcpy(&rec->u.be, &sr_buf->payload.bit_error,
393 r->primitive_sequence_error_count); 160 sizeof(struct fsf_bit_error_payload));
394 zfcp_dbf_out(p, "inval_trans_word_err", "%d",
395 r->invalid_transmission_word_error_count);
396 zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
397 zfcp_dbf_out(p, "prim_seq_event_to", "%d",
398 r->primitive_sequence_event_timeout_count);
399 zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
400 r->elastic_buffer_overrun_error_count);
401 zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
402 r->advertised_receive_b2b_credit);
403 zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
404 r->current_receive_b2b_credit);
405 zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
406 r->advertised_transmit_b2b_credit);
407 zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
408 r->current_transmit_b2b_credit);
409}
410 161
411static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, 162 debug_event(dbf->hba, 1, rec, sizeof(*rec));
412 char *out_buf, const char *in_buf) 163 spin_unlock_irqrestore(&dbf->hba_lock, flags);
413{
414 struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf;
415 char *p = out_buf;
416
417 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
418 return 0;
419
420 zfcp_dbf_tag(&p, "tag", r->tag);
421 if (isalpha(r->tag2[0]))
422 zfcp_dbf_tag(&p, "tag2", r->tag2);
423
424 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
425 zfcp_dbf_hba_view_response(&p, &r->u.response);
426 else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
427 zfcp_dbf_hba_view_status(&p, &r->u.status);
428 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
429 zfcp_dbf_hba_view_qdio(&p, &r->u.qdio);
430 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
431 zfcp_dbf_hba_view_berr(&p, &r->u.berr);
432
433 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
434 p += sprintf(p, "\n");
435 return p - out_buf;
436} 164}
437 165
438static struct debug_view zfcp_dbf_hba_view = { 166static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
439 .name = "structured", 167 struct zfcp_adapter *adapter,
440 .header_proc = zfcp_dbf_view_header, 168 struct zfcp_port *port,
441 .format_proc = zfcp_dbf_hba_view_format, 169 struct scsi_device *sdev)
442};
443
444static const char *zfcp_dbf_rec_tags[] = {
445 [ZFCP_REC_DBF_ID_THREAD] = "thread",
446 [ZFCP_REC_DBF_ID_TARGET] = "target",
447 [ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
448 [ZFCP_REC_DBF_ID_ACTION] = "action",
449};
450
451static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
452 char *buf, const char *_rec)
453{ 170{
454 struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; 171 rec->adapter_status = atomic_read(&adapter->status);
455 char *p = buf; 172 if (port) {
456 char hint[ZFCP_DBF_ID_SIZE + 1]; 173 rec->port_status = atomic_read(&port->status);
457 174 rec->wwpn = port->wwpn;
458 memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); 175 rec->d_id = port->d_id;
459 hint[ZFCP_DBF_ID_SIZE] = 0; 176 }
460 zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); 177 if (sdev) {
461 zfcp_dbf_outs(&p, "hint", hint); 178 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
462 switch (r->id) { 179 rec->lun = zfcp_scsi_dev_lun(sdev);
463 case ZFCP_REC_DBF_ID_THREAD:
464 zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
465 zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready);
466 zfcp_dbf_out(&p, "running", "%d", r->u.thread.running);
467 break;
468 case ZFCP_REC_DBF_ID_TARGET:
469 zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref);
470 zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status);
471 zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count);
472 zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id);
473 zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn);
474 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun);
475 break;
476 case ZFCP_REC_DBF_ID_TRIGGER:
477 zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref);
478 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action);
479 zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want);
480 zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need);
481 zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn);
482 zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
483 zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
484 zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
485 zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us);
486 break;
487 case ZFCP_REC_DBF_ID_ACTION:
488 zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
489 zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req);
490 zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status);
491 zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step);
492 break;
493 } 180 }
494 p += sprintf(p, "\n");
495 return p - buf;
496} 181}
497 182
498static struct debug_view zfcp_dbf_rec_view = {
499 .name = "structured",
500 .header_proc = zfcp_dbf_view_header,
501 .format_proc = zfcp_dbf_rec_view_format,
502};
503
504/** 183/**
505 * zfcp_dbf_rec_thread - trace event related to recovery thread operation 184 * zfcp_dbf_rec_trig - trace event related to triggered recovery
506 * @id2: identifier for event 185 * @tag: identifier for event
507 * @dbf: reference to dbf structure 186 * @adapter: adapter on which the erp_action should run
508 * This function assumes that the caller is holding erp_lock. 187 * @port: remote port involved in the erp_action
188 * @sdev: scsi device involved in the erp_action
189 * @want: wanted erp_action
190 * @need: required erp_action
191 *
192 * The adapter->erp_lock has to be held.
509 */ 193 */
510void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) 194void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
195 struct zfcp_port *port, struct scsi_device *sdev,
196 u8 want, u8 need)
511{ 197{
512 struct zfcp_adapter *adapter = dbf->adapter; 198 struct zfcp_dbf *dbf = adapter->dbf;
513 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 199 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
514 unsigned long flags = 0;
515 struct list_head *entry; 200 struct list_head *entry;
516 unsigned ready = 0, running = 0, total;
517
518 list_for_each(entry, &adapter->erp_ready_head)
519 ready++;
520 list_for_each(entry, &adapter->erp_running_head)
521 running++;
522 total = adapter->erp_total_count;
523
524 spin_lock_irqsave(&dbf->rec_lock, flags);
525 memset(r, 0, sizeof(*r));
526 r->id = ZFCP_REC_DBF_ID_THREAD;
527 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
528 r->u.thread.total = total;
529 r->u.thread.ready = ready;
530 r->u.thread.running = running;
531 debug_event(dbf->rec, 6, r, sizeof(*r));
532 spin_unlock_irqrestore(&dbf->rec_lock, flags);
533}
534
535/**
536 * zfcp_dbf_rec_thread - trace event related to recovery thread operation
537 * @id2: identifier for event
538 * @adapter: adapter
539 * This function assumes that the caller does not hold erp_lock.
540 */
541void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf)
542{
543 struct zfcp_adapter *adapter = dbf->adapter;
544 unsigned long flags;
545
546 read_lock_irqsave(&adapter->erp_lock, flags);
547 zfcp_dbf_rec_thread(id2, dbf);
548 read_unlock_irqrestore(&adapter->erp_lock, flags);
549}
550
551static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf,
552 atomic_t *status, atomic_t *erp_count, u64 wwpn,
553 u32 d_id, u64 fcp_lun)
554{
555 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
556 unsigned long flags; 201 unsigned long flags;
557 202
558 spin_lock_irqsave(&dbf->rec_lock, flags); 203 spin_lock_irqsave(&dbf->rec_lock, flags);
559 memset(r, 0, sizeof(*r)); 204 memset(rec, 0, sizeof(*rec));
560 r->id = ZFCP_REC_DBF_ID_TARGET;
561 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
562 r->u.target.ref = (unsigned long)ref;
563 r->u.target.status = atomic_read(status);
564 r->u.target.wwpn = wwpn;
565 r->u.target.d_id = d_id;
566 r->u.target.fcp_lun = fcp_lun;
567 r->u.target.erp_count = atomic_read(erp_count);
568 debug_event(dbf->rec, 3, r, sizeof(*r));
569 spin_unlock_irqrestore(&dbf->rec_lock, flags);
570}
571
572/**
573 * zfcp_dbf_rec_adapter - trace event for adapter state change
574 * @id: identifier for trigger of state change
575 * @ref: additional reference (e.g. request)
576 * @dbf: reference to dbf structure
577 */
578void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
579{
580 struct zfcp_adapter *adapter = dbf->adapter;
581
582 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
583 &adapter->erp_counter, 0, 0,
584 ZFCP_DBF_INVALID_LUN);
585}
586
587/**
588 * zfcp_dbf_rec_port - trace event for port state change
589 * @id: identifier for trigger of state change
590 * @ref: additional reference (e.g. request)
591 * @port: port
592 */
593void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
594{
595 struct zfcp_dbf *dbf = port->adapter->dbf;
596 205
597 zfcp_dbf_rec_target(id, ref, dbf, &port->status, 206 rec->id = ZFCP_DBF_REC_TRIG;
598 &port->erp_counter, port->wwpn, port->d_id, 207 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
599 ZFCP_DBF_INVALID_LUN); 208 zfcp_dbf_set_common(rec, adapter, port, sdev);
600}
601 209
602/** 210 list_for_each(entry, &adapter->erp_ready_head)
603 * zfcp_dbf_rec_unit - trace event for unit state change 211 rec->u.trig.ready++;
604 * @id: identifier for trigger of state change
605 * @ref: additional reference (e.g. request)
606 * @unit: unit
607 */
608void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
609{
610 struct zfcp_port *port = unit->port;
611 struct zfcp_dbf *dbf = port->adapter->dbf;
612 212
613 zfcp_dbf_rec_target(id, ref, dbf, &unit->status, 213 list_for_each(entry, &adapter->erp_running_head)
614 &unit->erp_counter, port->wwpn, port->d_id, 214 rec->u.trig.running++;
615 unit->fcp_lun);
616}
617 215
618/** 216 rec->u.trig.want = want;
619 * zfcp_dbf_rec_trigger - trace event for triggered error recovery 217 rec->u.trig.need = need;
620 * @id2: identifier for error recovery trigger
621 * @ref: additional reference (e.g. request)
622 * @want: originally requested error recovery action
623 * @need: error recovery action actually initiated
624 * @action: address of error recovery action struct
625 * @adapter: adapter
626 * @port: port
627 * @unit: unit
628 */
629void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
630 struct zfcp_adapter *adapter, struct zfcp_port *port,
631 struct zfcp_unit *unit)
632{
633 struct zfcp_dbf *dbf = adapter->dbf;
634 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
635 unsigned long flags;
636 218
637 spin_lock_irqsave(&dbf->rec_lock, flags); 219 debug_event(dbf->rec, 1, rec, sizeof(*rec));
638 memset(r, 0, sizeof(*r));
639 r->id = ZFCP_REC_DBF_ID_TRIGGER;
640 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
641 r->u.trigger.ref = (unsigned long)ref;
642 r->u.trigger.want = want;
643 r->u.trigger.need = need;
644 r->u.trigger.action = (unsigned long)action;
645 r->u.trigger.as = atomic_read(&adapter->status);
646 if (port) {
647 r->u.trigger.ps = atomic_read(&port->status);
648 r->u.trigger.wwpn = port->wwpn;
649 }
650 if (unit)
651 r->u.trigger.us = atomic_read(&unit->status);
652 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
653 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
654 spin_unlock_irqrestore(&dbf->rec_lock, flags); 220 spin_unlock_irqrestore(&dbf->rec_lock, flags);
655} 221}
656 222
223
657/** 224/**
658 * zfcp_dbf_rec_action - trace event showing progress of recovery action 225 * zfcp_dbf_rec_run - trace event related to running recovery
659 * @id2: identifier 226 * @tag: identifier for event
660 * @erp_action: error recovery action struct pointer 227 * @erp: erp_action running
661 */ 228 */
662void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) 229void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
663{ 230{
664 struct zfcp_dbf *dbf = erp_action->adapter->dbf; 231 struct zfcp_dbf *dbf = erp->adapter->dbf;
665 struct zfcp_dbf_rec_record *r = &dbf->rec_buf; 232 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
666 unsigned long flags; 233 unsigned long flags;
667 234
668 spin_lock_irqsave(&dbf->rec_lock, flags); 235 spin_lock_irqsave(&dbf->rec_lock, flags);
669 memset(r, 0, sizeof(*r)); 236 memset(rec, 0, sizeof(*rec));
670 r->id = ZFCP_REC_DBF_ID_ACTION;
671 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
672 r->u.action.action = (unsigned long)erp_action;
673 r->u.action.status = erp_action->status;
674 r->u.action.step = erp_action->step;
675 r->u.action.fsf_req = erp_action->fsf_req_id;
676 debug_event(dbf->rec, 5, r, sizeof(*r));
677 spin_unlock_irqrestore(&dbf->rec_lock, flags);
678}
679 237
680/** 238 rec->id = ZFCP_DBF_REC_RUN;
681 * zfcp_dbf_san_ct_request - trace event for issued CT request 239 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
682 * @fsf_req: request containing issued CT data 240 zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
683 * @d_id: destination id where ct request is sent to
684 */
685void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
686{
687 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
688 struct zfcp_adapter *adapter = fsf_req->adapter;
689 struct zfcp_dbf *dbf = adapter->dbf;
690 struct fc_ct_hdr *hdr = sg_virt(ct->req);
691 struct zfcp_dbf_san_record *r = &dbf->san_buf;
692 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
693 int level = 3;
694 unsigned long flags;
695 241
696 spin_lock_irqsave(&dbf->san_lock, flags); 242 rec->u.run.fsf_req_id = erp->fsf_req_id;
697 memset(r, 0, sizeof(*r)); 243 rec->u.run.rec_status = erp->status;
698 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 244 rec->u.run.rec_step = erp->step;
699 r->fsf_reqid = fsf_req->req_id; 245 rec->u.run.rec_action = erp->action;
700 r->fsf_seqno = fsf_req->seq_no;
701 oct->d_id = d_id;
702 oct->cmd_req_code = hdr->ct_cmd;
703 oct->revision = hdr->ct_rev;
704 oct->gs_type = hdr->ct_fs_type;
705 oct->gs_subtype = hdr->ct_fs_subtype;
706 oct->options = hdr->ct_options;
707 oct->max_res_size = hdr->ct_mr_size;
708 oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
709 ZFCP_DBF_SAN_MAX_PAYLOAD);
710 debug_event(dbf->san, level, r, sizeof(*r));
711 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
712 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
713 spin_unlock_irqrestore(&dbf->san_lock, flags);
714}
715 246
716/** 247 if (erp->sdev)
717 * zfcp_dbf_san_ct_response - trace event for completion of CT request 248 rec->u.run.rec_count =
718 * @fsf_req: request containing CT response 249 atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
719 */ 250 else if (erp->port)
720void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) 251 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
721{ 252 else
722 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; 253 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
723 struct zfcp_adapter *adapter = fsf_req->adapter;
724 struct fc_ct_hdr *hdr = sg_virt(ct->resp);
725 struct zfcp_dbf *dbf = adapter->dbf;
726 struct zfcp_dbf_san_record *r = &dbf->san_buf;
727 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
728 int level = 3;
729 unsigned long flags;
730 254
731 spin_lock_irqsave(&dbf->san_lock, flags); 255 debug_event(dbf->rec, 1, rec, sizeof(*rec));
732 memset(r, 0, sizeof(*r)); 256 spin_unlock_irqrestore(&dbf->rec_lock, flags);
733 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
734 r->fsf_reqid = fsf_req->req_id;
735 r->fsf_seqno = fsf_req->seq_no;
736 rct->cmd_rsp_code = hdr->ct_cmd;
737 rct->revision = hdr->ct_rev;
738 rct->reason_code = hdr->ct_reason;
739 rct->expl = hdr->ct_explan;
740 rct->vendor_unique = hdr->ct_vendor;
741 rct->max_res_size = hdr->ct_mr_size;
742 rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
743 ZFCP_DBF_SAN_MAX_PAYLOAD);
744 debug_event(dbf->san, level, r, sizeof(*r));
745 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
746 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
747 spin_unlock_irqrestore(&dbf->san_lock, flags);
748} 257}
749 258
750static void zfcp_dbf_san_els(const char *tag, int level, 259static inline
751 struct zfcp_fsf_req *fsf_req, u32 d_id, 260void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
752 void *buffer, int buflen) 261 u64 req_id, u32 d_id)
753{ 262{
754 struct zfcp_adapter *adapter = fsf_req->adapter; 263 struct zfcp_dbf_san *rec = &dbf->san_buf;
755 struct zfcp_dbf *dbf = adapter->dbf; 264 u16 rec_len;
756 struct zfcp_dbf_san_record *rec = &dbf->san_buf;
757 unsigned long flags; 265 unsigned long flags;
758 266
759 spin_lock_irqsave(&dbf->san_lock, flags); 267 spin_lock_irqsave(&dbf->san_lock, flags);
760 memset(rec, 0, sizeof(*rec)); 268 memset(rec, 0, sizeof(*rec));
761 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 269
762 rec->fsf_reqid = fsf_req->req_id; 270 rec->id = id;
763 rec->fsf_seqno = fsf_req->seq_no; 271 rec->fsf_req_id = req_id;
764 rec->u.els.d_id = d_id; 272 rec->d_id = d_id;
765 debug_event(dbf->san, level, rec, sizeof(*rec)); 273 rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
766 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, 274 memcpy(rec->payload, data, rec_len);
767 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 275 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
276
277 debug_event(dbf->san, 1, rec, sizeof(*rec));
768 spin_unlock_irqrestore(&dbf->san_lock, flags); 278 spin_unlock_irqrestore(&dbf->san_lock, flags);
769} 279}
770 280
771/** 281/**
772 * zfcp_dbf_san_els_request - trace event for issued ELS 282 * zfcp_dbf_san_req - trace event for issued SAN request
773 * @fsf_req: request containing issued ELS 283 * @tag: indentifier for event
284 * @fsf_req: request containing issued CT data
285 * d_id: destination ID
774 */ 286 */
775void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) 287void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
776{ 288{
777 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; 289 struct zfcp_dbf *dbf = fsf->adapter->dbf;
778 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); 290 struct zfcp_fsf_ct_els *ct_els = fsf->data;
291 u16 length;
779 292
780 zfcp_dbf_san_els("oels", 2, fsf_req, d_id, 293 length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
781 sg_virt(els->req), els->req->length); 294 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
295 fsf->req_id, d_id);
782} 296}
783 297
784/** 298/**
785 * zfcp_dbf_san_els_response - trace event for completed ELS 299 * zfcp_dbf_san_res - trace event for received SAN request
786 * @fsf_req: request containing ELS response 300 * @tag: indentifier for event
301 * @fsf_req: request containing issued CT data
787 */ 302 */
788void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) 303void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
789{ 304{
790 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; 305 struct zfcp_dbf *dbf = fsf->adapter->dbf;
791 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); 306 struct zfcp_fsf_ct_els *ct_els = fsf->data;
307 u16 length;
792 308
793 zfcp_dbf_san_els("rels", 2, fsf_req, d_id, 309 length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
794 sg_virt(els->resp), els->resp->length); 310 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
311 fsf->req_id, 0);
795} 312}
796 313
797/** 314/**
798 * zfcp_dbf_san_incoming_els - trace event for incomig ELS 315 * zfcp_dbf_san_in_els - trace event for incoming ELS
799 * @fsf_req: request containing unsolicited status buffer with incoming ELS 316 * @tag: indentifier for event
317 * @fsf_req: request containing issued CT data
800 */ 318 */
801void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) 319void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
802{ 320{
803 struct fsf_status_read_buffer *buf = 321 struct zfcp_dbf *dbf = fsf->adapter->dbf;
804 (struct fsf_status_read_buffer *)fsf_req->data; 322 struct fsf_status_read_buffer *srb =
805 int length = (int)buf->length - 323 (struct fsf_status_read_buffer *) fsf->data;
806 (int)((void *)&buf->payload - (void *)buf); 324 u16 length;
807 325
808 zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), 326 length = (u16)(srb->length -
809 (void *)buf->payload.data, length); 327 offsetof(struct fsf_status_read_buffer, payload));
810} 328 zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
811 329 fsf->req_id, ntoh24(srb->d_id));
812static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
813 char *out_buf, const char *in_buf)
814{
815 struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf;
816 char *p = out_buf;
817
818 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
819 return 0;
820
821 zfcp_dbf_tag(&p, "tag", r->tag);
822 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
823 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
824
825 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
826 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
827 zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
828 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
829 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
830 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
831 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
832 zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
833 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
834 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
835 struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp;
836 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
837 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
838 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
839 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
840 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
841 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
842 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
843 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
844 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
845 struct zfcp_dbf_san_record_els *els = &r->u.els;
846 zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
847 }
848 return p - out_buf;
849} 330}
850 331
851static struct debug_view zfcp_dbf_san_view = { 332/**
852 .name = "structured", 333 * zfcp_dbf_scsi - trace event for scsi commands
853 .header_proc = zfcp_dbf_view_header, 334 * @tag: identifier for event
854 .format_proc = zfcp_dbf_san_view_format, 335 * @sc: pointer to struct scsi_cmnd
855}; 336 * @fsf: pointer to struct zfcp_fsf_req
856 337 */
857void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, 338void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
858 struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd,
859 struct zfcp_fsf_req *fsf_req, unsigned long old_req_id)
860{ 339{
861 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; 340 struct zfcp_adapter *adapter =
862 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 341 (struct zfcp_adapter *) sc->device->host->hostdata[0];
863 unsigned long flags; 342 struct zfcp_dbf *dbf = adapter->dbf;
343 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
864 struct fcp_resp_with_ext *fcp_rsp; 344 struct fcp_resp_with_ext *fcp_rsp;
865 struct fcp_resp_rsp_info *fcp_rsp_info = NULL; 345 struct fcp_resp_rsp_info *fcp_rsp_info;
866 char *fcp_sns_info = NULL; 346 unsigned long flags;
867 int offset = 0, buflen = 0;
868 347
869 spin_lock_irqsave(&dbf->scsi_lock, flags); 348 spin_lock_irqsave(&dbf->scsi_lock, flags);
870 do { 349 memset(rec, 0, sizeof(*rec));
871 memset(rec, 0, sizeof(*rec));
872 if (offset == 0) {
873 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
874 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
875 if (scsi_cmnd != NULL) {
876 if (scsi_cmnd->device) {
877 rec->scsi_id = scsi_cmnd->device->id;
878 rec->scsi_lun = scsi_cmnd->device->lun;
879 }
880 rec->scsi_result = scsi_cmnd->result;
881 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
882 rec->scsi_serial = scsi_cmnd->serial_number;
883 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
884 min((int)scsi_cmnd->cmd_len,
885 ZFCP_DBF_SCSI_OPCODE));
886 rec->scsi_retries = scsi_cmnd->retries;
887 rec->scsi_allowed = scsi_cmnd->allowed;
888 }
889 if (fsf_req != NULL) {
890 fcp_rsp = (struct fcp_resp_with_ext *)
891 &(fsf_req->qtcb->bottom.io.fcp_rsp);
892 fcp_rsp_info = (struct fcp_resp_rsp_info *)
893 &fcp_rsp[1];
894 fcp_sns_info = (char *) &fcp_rsp[1];
895 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
896 fcp_sns_info += fcp_rsp->ext.fr_sns_len;
897
898 rec->rsp_validity = fcp_rsp->resp.fr_flags;
899 rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
900 rec->rsp_resid = fcp_rsp->ext.fr_resid;
901 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
902 rec->rsp_code = fcp_rsp_info->rsp_code;
903 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
904 buflen = min(fcp_rsp->ext.fr_sns_len,
905 (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
906 rec->sns_info_len = buflen;
907 memcpy(rec->sns_info, fcp_sns_info,
908 min(buflen,
909 ZFCP_DBF_SCSI_FCP_SNS_INFO));
910 offset += min(buflen,
911 ZFCP_DBF_SCSI_FCP_SNS_INFO);
912 }
913
914 rec->fsf_reqid = fsf_req->req_id;
915 rec->fsf_seqno = fsf_req->seq_no;
916 rec->fsf_issued = fsf_req->issued;
917 }
918 rec->old_fsf_reqid = old_req_id;
919 } else {
920 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
921 dump->total_size = buflen;
922 dump->offset = offset;
923 dump->size = min(buflen - offset,
924 (int)sizeof(struct
925 zfcp_dbf_scsi_record) -
926 (int)sizeof(struct zfcp_dbf_dump));
927 memcpy(dump->data, fcp_sns_info + offset, dump->size);
928 offset += dump->size;
929 }
930 debug_event(dbf->scsi, level, rec, sizeof(*rec));
931 } while (offset < buflen);
932 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
933}
934 350
935static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, 351 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
936 char *out_buf, const char *in_buf) 352 rec->id = ZFCP_DBF_SCSI_CMND;
937{ 353 rec->scsi_result = sc->result;
938 struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; 354 rec->scsi_retries = sc->retries;
939 struct timespec t; 355 rec->scsi_allowed = sc->allowed;
940 char *p = out_buf; 356 rec->scsi_id = sc->device->id;
941 357 rec->scsi_lun = sc->device->lun;
942 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) 358 rec->host_scribble = (unsigned long)sc->host_scribble;
943 return 0; 359
944 360 memcpy(rec->scsi_opcode, sc->cmnd,
945 zfcp_dbf_tag(&p, "tag", r->tag); 361 min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
946 zfcp_dbf_tag(&p, "tag2", r->tag2); 362
947 zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id); 363 if (fsf) {
948 zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); 364 rec->fsf_req_id = fsf->req_id;
949 zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); 365 fcp_rsp = (struct fcp_resp_with_ext *)
950 zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); 366 &(fsf->qtcb->bottom.io.fcp_rsp);
951 zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial); 367 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
952 zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, 368 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
953 0, ZFCP_DBF_SCSI_OPCODE); 369 fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
954 zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); 370 rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
955 zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed); 371 }
956 if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) 372 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
957 zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); 373 rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
958 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 374 (u16)ZFCP_DBF_PAY_MAX_REC);
959 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 375 zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
960 stck_to_timespec(r->fsf_issued, &t); 376 "fcp_sns", fsf->req_id);
961 zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); 377 }
962
963 if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
964 zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity);
965 zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x",
966 r->rsp_scsi_status);
967 zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid);
968 zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code);
969 zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len);
970 zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info,
971 min((int)r->sns_info_len,
972 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
973 r->sns_info_len);
974 } 378 }
975 p += sprintf(p, "\n");
976 return p - out_buf;
977}
978 379
979static struct debug_view zfcp_dbf_scsi_view = { 380 debug_event(dbf->scsi, 1, rec, sizeof(*rec));
980 .name = "structured", 381 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
981 .header_proc = zfcp_dbf_view_header, 382}
982 .format_proc = zfcp_dbf_scsi_view_format,
983};
984 383
985static debug_info_t *zfcp_dbf_reg(const char *name, int level, 384static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
986 struct debug_view *view, int size)
987{ 385{
988 struct debug_info *d; 386 struct debug_info *d;
989 387
990 d = debug_register(name, dbfsize, level, size); 388 d = debug_register(name, size, 1, rec_size);
991 if (!d) 389 if (!d)
992 return NULL; 390 return NULL;
993 391
994 debug_register_view(d, &debug_hex_ascii_view); 392 debug_register_view(d, &debug_hex_ascii_view);
995 debug_register_view(d, view); 393 debug_set_level(d, 3);
996 debug_set_level(d, level);
997 394
998 return d; 395 return d;
999} 396}
1000 397
398static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
399{
400 if (!dbf)
401 return;
402
403 debug_unregister(dbf->scsi);
404 debug_unregister(dbf->san);
405 debug_unregister(dbf->hba);
406 debug_unregister(dbf->pay);
407 debug_unregister(dbf->rec);
408 kfree(dbf);
409}
410
1001/** 411/**
1002 * zfcp_adapter_debug_register - registers debug feature for an adapter 412 * zfcp_adapter_debug_register - registers debug feature for an adapter
1003 * @adapter: pointer to adapter for which debug features should be registered 413 * @adapter: pointer to adapter for which debug features should be registered
@@ -1005,69 +415,66 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level,
1005 */ 415 */
1006int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) 416int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
1007{ 417{
1008 char dbf_name[DEBUG_MAX_NAME_LEN]; 418 char name[DEBUG_MAX_NAME_LEN];
1009 struct zfcp_dbf *dbf; 419 struct zfcp_dbf *dbf;
1010 420
1011 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); 421 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
1012 if (!dbf) 422 if (!dbf)
1013 return -ENOMEM; 423 return -ENOMEM;
1014 424
1015 dbf->adapter = adapter; 425 spin_lock_init(&dbf->pay_lock);
1016
1017 spin_lock_init(&dbf->hba_lock); 426 spin_lock_init(&dbf->hba_lock);
1018 spin_lock_init(&dbf->san_lock); 427 spin_lock_init(&dbf->san_lock);
1019 spin_lock_init(&dbf->scsi_lock); 428 spin_lock_init(&dbf->scsi_lock);
1020 spin_lock_init(&dbf->rec_lock); 429 spin_lock_init(&dbf->rec_lock);
1021 430
1022 /* debug feature area which records recovery activity */ 431 /* debug feature area which records recovery activity */
1023 sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); 432 sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
1024 dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, 433 dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
1025 sizeof(struct zfcp_dbf_rec_record));
1026 if (!dbf->rec) 434 if (!dbf->rec)
1027 goto err_out; 435 goto err_out;
1028 436
1029 /* debug feature area which records HBA (FSF and QDIO) conditions */ 437 /* debug feature area which records HBA (FSF and QDIO) conditions */
1030 sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); 438 sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
1031 dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, 439 dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
1032 sizeof(struct zfcp_dbf_hba_record));
1033 if (!dbf->hba) 440 if (!dbf->hba)
1034 goto err_out; 441 goto err_out;
1035 442
443 /* debug feature area which records payload info */
444 sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
445 dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
446 if (!dbf->pay)
447 goto err_out;
448
1036 /* debug feature area which records SAN command failures and recovery */ 449 /* debug feature area which records SAN command failures and recovery */
1037 sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); 450 sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
1038 dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, 451 dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
1039 sizeof(struct zfcp_dbf_san_record));
1040 if (!dbf->san) 452 if (!dbf->san)
1041 goto err_out; 453 goto err_out;
1042 454
1043 /* debug feature area which records SCSI command failures and recovery */ 455 /* debug feature area which records SCSI command failures and recovery */
1044 sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); 456 sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
1045 dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, 457 dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
1046 sizeof(struct zfcp_dbf_scsi_record));
1047 if (!dbf->scsi) 458 if (!dbf->scsi)
1048 goto err_out; 459 goto err_out;
1049 460
1050 adapter->dbf = dbf; 461 adapter->dbf = dbf;
1051 return 0;
1052 462
463 return 0;
1053err_out: 464err_out:
1054 zfcp_dbf_adapter_unregister(dbf); 465 zfcp_dbf_unregister(dbf);
1055 return -ENOMEM; 466 return -ENOMEM;
1056} 467}
1057 468
1058/** 469/**
1059 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter 470 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1060 * @dbf: pointer to dbf for which debug features should be unregistered 471 * @adapter: pointer to adapter for which debug features should be unregistered
1061 */ 472 */
1062void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) 473void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
1063{ 474{
1064 if (!dbf) 475 struct zfcp_dbf *dbf = adapter->dbf;
1065 return; 476
1066 debug_unregister(dbf->scsi); 477 adapter->dbf = NULL;
1067 debug_unregister(dbf->san); 478 zfcp_dbf_unregister(dbf);
1068 debug_unregister(dbf->hba);
1069 debug_unregister(dbf->rec);
1070 dbf->adapter->dbf = NULL;
1071 kfree(dbf);
1072} 479}
1073 480
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 2bcc3403126a..714f087eb7a9 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -1,22 +1,8 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries. 3 * debug feature declarations
4 * 4 *
5 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2010
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 6 */
21 7
22#ifndef ZFCP_DBF_H 8#ifndef ZFCP_DBF_H
@@ -27,339 +13,370 @@
27#include "zfcp_fsf.h" 13#include "zfcp_fsf.h"
28#include "zfcp_def.h" 14#include "zfcp_def.h"
29 15
30#define ZFCP_DBF_TAG_SIZE 4 16#define ZFCP_DBF_TAG_LEN 7
31#define ZFCP_DBF_ID_SIZE 7
32 17
33#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull 18#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
34 19
35struct zfcp_dbf_dump { 20/**
36 u8 tag[ZFCP_DBF_TAG_SIZE]; 21 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
37 u32 total_size; /* size of total dump data */ 22 * @ready: number of ready recovery actions
38 u32 offset; /* how much data has being already dumped */ 23 * @running: number of running recovery actions
39 u32 size; /* how much data comes with this record */ 24 * @want: wanted recovery action
40 u8 data[]; /* dump data */ 25 * @need: needed recovery action
41} __attribute__ ((packed)); 26 */
42 27struct zfcp_dbf_rec_trigger {
43struct zfcp_dbf_rec_record_thread {
44 u32 total;
45 u32 ready; 28 u32 ready;
46 u32 running; 29 u32 running;
47};
48
49struct zfcp_dbf_rec_record_target {
50 u64 ref;
51 u32 status;
52 u32 d_id;
53 u64 wwpn;
54 u64 fcp_lun;
55 u32 erp_count;
56};
57
58struct zfcp_dbf_rec_record_trigger {
59 u8 want; 30 u8 want;
60 u8 need; 31 u8 need;
61 u32 as; 32} __packed;
62 u32 ps;
63 u32 us;
64 u64 ref;
65 u64 action;
66 u64 wwpn;
67 u64 fcp_lun;
68};
69 33
70struct zfcp_dbf_rec_record_action { 34/**
71 u32 status; 35 * struct zfcp_dbf_rec_running - trace record for running recovery
72 u32 step; 36 * @fsf_req_id: request id for fsf requests
73 u64 action; 37 * @rec_status: status of the fsf request
74 u64 fsf_req; 38 * @rec_step: current step of the recovery action
39 * rec_count: recovery counter
40 */
41struct zfcp_dbf_rec_running {
42 u64 fsf_req_id;
43 u32 rec_status;
44 u16 rec_step;
45 u8 rec_action;
46 u8 rec_count;
47} __packed;
48
49/**
50 * enum zfcp_dbf_rec_id - recovery trace record id
51 * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
52 * @ZFCP_DBF_REC_RUN: running recovery identifier
53 */
54enum zfcp_dbf_rec_id {
55 ZFCP_DBF_REC_TRIG = 1,
56 ZFCP_DBF_REC_RUN = 2,
75}; 57};
76 58
77struct zfcp_dbf_rec_record { 59/**
60 * struct zfcp_dbf_rec - trace record for error recovery actions
61 * @id: unique number of recovery record type
62 * @tag: identifier string specifying the location of initiation
63 * @lun: logical unit number
64 * @wwpn: word wide port number
65 * @d_id: destination ID
66 * @adapter_status: current status of the adapter
67 * @port_status: current status of the port
68 * @lun_status: current status of the lun
69 * @u.trig: structure zfcp_dbf_rec_trigger
70 * @u.run: structure zfcp_dbf_rec_running
71 */
72struct zfcp_dbf_rec {
78 u8 id; 73 u8 id;
79 char id2[7]; 74 char tag[ZFCP_DBF_TAG_LEN];
75 u64 lun;
76 u64 wwpn;
77 u32 d_id;
78 u32 adapter_status;
79 u32 port_status;
80 u32 lun_status;
80 union { 81 union {
81 struct zfcp_dbf_rec_record_action action; 82 struct zfcp_dbf_rec_trigger trig;
82 struct zfcp_dbf_rec_record_thread thread; 83 struct zfcp_dbf_rec_running run;
83 struct zfcp_dbf_rec_record_target target;
84 struct zfcp_dbf_rec_record_trigger trigger;
85 } u; 84 } u;
86}; 85} __packed;
87 86
88enum { 87/**
89 ZFCP_REC_DBF_ID_ACTION, 88 * enum zfcp_dbf_san_id - SAN trace record identifier
90 ZFCP_REC_DBF_ID_THREAD, 89 * @ZFCP_DBF_SAN_REQ: request trace record id
91 ZFCP_REC_DBF_ID_TARGET, 90 * @ZFCP_DBF_SAN_RES: response trace record id
92 ZFCP_REC_DBF_ID_TRIGGER, 91 * @ZFCP_DBF_SAN_ELS: extended link service record id
92 */
93enum zfcp_dbf_san_id {
94 ZFCP_DBF_SAN_REQ = 1,
95 ZFCP_DBF_SAN_RES = 2,
96 ZFCP_DBF_SAN_ELS = 3,
93}; 97};
94 98
95struct zfcp_dbf_hba_record_response { 99/** struct zfcp_dbf_san - trace record for SAN requests and responses
96 u32 fsf_command; 100 * @id: unique number of recovery record type
97 u64 fsf_reqid; 101 * @tag: identifier string specifying the location of initiation
98 u32 fsf_seqno; 102 * @fsf_req_id: request id for fsf requests
99 u64 fsf_issued; 103 * @payload: unformatted information related to request/response
100 u32 fsf_prot_status; 104 * @d_id: destination id
105 */
106struct zfcp_dbf_san {
107 u8 id;
108 char tag[ZFCP_DBF_TAG_LEN];
109 u64 fsf_req_id;
110 u32 d_id;
111#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
112 char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
113} __packed;
114
115/**
116 * struct zfcp_dbf_hba_res - trace record for hba responses
117 * @req_issued: timestamp when request was issued
118 * @prot_status: protocol status
119 * @prot_status_qual: protocol status qualifier
120 * @fsf_status: fsf status
121 * @fsf_status_qual: fsf status qualifier
122 */
123struct zfcp_dbf_hba_res {
124 u64 req_issued;
125 u32 prot_status;
126 u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
101 u32 fsf_status; 127 u32 fsf_status;
102 u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; 128 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
103 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; 129} __packed;
104 u32 fsf_req_status;
105 u8 sbal_first;
106 u8 sbal_last;
107 u8 sbal_response;
108 u8 pool;
109 u64 erp_action;
110 union {
111 struct {
112 u64 cmnd;
113 u64 serial;
114 u32 data_dir;
115 } fcp;
116 struct {
117 u64 wwpn;
118 u32 d_id;
119 u32 port_handle;
120 } port;
121 struct {
122 u64 wwpn;
123 u64 fcp_lun;
124 u32 port_handle;
125 u32 lun_handle;
126 } unit;
127 struct {
128 u32 d_id;
129 } els;
130 } u;
131} __attribute__ ((packed));
132 130
133struct zfcp_dbf_hba_record_status { 131/**
134 u8 failed; 132 * struct zfcp_dbf_hba_uss - trace record for unsolicited status
133 * @status_type: type of unsolicited status
134 * @status_subtype: subtype of unsolicited status
135 * @d_id: destination ID
136 * @lun: logical unit number
137 * @queue_designator: queue designator
138 */
139struct zfcp_dbf_hba_uss {
135 u32 status_type; 140 u32 status_type;
136 u32 status_subtype; 141 u32 status_subtype;
137 struct fsf_queue_designator
138 queue_designator;
139 u32 payload_size;
140#define ZFCP_DBF_UNSOL_PAYLOAD 80
141#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
142#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
143#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
144 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
145} __attribute__ ((packed));
146
147struct zfcp_dbf_hba_record_qdio {
148 u32 qdio_error;
149 u8 sbal_index;
150 u8 sbal_count;
151} __attribute__ ((packed));
152
153struct zfcp_dbf_hba_record {
154 u8 tag[ZFCP_DBF_TAG_SIZE];
155 u8 tag2[ZFCP_DBF_TAG_SIZE];
156 union {
157 struct zfcp_dbf_hba_record_response response;
158 struct zfcp_dbf_hba_record_status status;
159 struct zfcp_dbf_hba_record_qdio qdio;
160 struct fsf_bit_error_payload berr;
161 } u;
162} __attribute__ ((packed));
163
164struct zfcp_dbf_san_record_ct_request {
165 u16 cmd_req_code;
166 u8 revision;
167 u8 gs_type;
168 u8 gs_subtype;
169 u8 options;
170 u16 max_res_size;
171 u32 len;
172 u32 d_id;
173} __attribute__ ((packed));
174
175struct zfcp_dbf_san_record_ct_response {
176 u16 cmd_rsp_code;
177 u8 revision;
178 u8 reason_code;
179 u8 expl;
180 u8 vendor_unique;
181 u16 max_res_size;
182 u32 len;
183} __attribute__ ((packed));
184
185struct zfcp_dbf_san_record_els {
186 u32 d_id; 142 u32 d_id;
187} __attribute__ ((packed)); 143 u64 lun;
144 u64 queue_designator;
145} __packed;
146
147/**
148 * enum zfcp_dbf_hba_id - HBA trace record identifier
149 * @ZFCP_DBF_HBA_RES: response trace record
150 * @ZFCP_DBF_HBA_USS: unsolicited status trace record
151 * @ZFCP_DBF_HBA_BIT: bit error trace record
152 */
153enum zfcp_dbf_hba_id {
154 ZFCP_DBF_HBA_RES = 1,
155 ZFCP_DBF_HBA_USS = 2,
156 ZFCP_DBF_HBA_BIT = 3,
157};
188 158
189struct zfcp_dbf_san_record { 159/**
190 u8 tag[ZFCP_DBF_TAG_SIZE]; 160 * struct zfcp_dbf_hba - common trace record for HBA records
191 u64 fsf_reqid; 161 * @id: unique number of recovery record type
192 u32 fsf_seqno; 162 * @tag: identifier string specifying the location of initiation
163 * @fsf_req_id: request id for fsf requests
164 * @fsf_req_status: status of fsf request
165 * @fsf_cmd: fsf command
166 * @fsf_seq_no: fsf sequence number
167 * @pl_len: length of payload stored as zfcp_dbf_pay
168 * @u: record type specific data
169 */
170struct zfcp_dbf_hba {
171 u8 id;
172 char tag[ZFCP_DBF_TAG_LEN];
173 u64 fsf_req_id;
174 u32 fsf_req_status;
175 u32 fsf_cmd;
176 u32 fsf_seq_no;
177 u16 pl_len;
193 union { 178 union {
194 struct zfcp_dbf_san_record_ct_request ct_req; 179 struct zfcp_dbf_hba_res res;
195 struct zfcp_dbf_san_record_ct_response ct_resp; 180 struct zfcp_dbf_hba_uss uss;
196 struct zfcp_dbf_san_record_els els; 181 struct fsf_bit_error_payload be;
197 } u; 182 } u;
198} __attribute__ ((packed)); 183} __packed;
199 184
200#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 185/**
186 * enum zfcp_dbf_scsi_id - scsi trace record identifier
187 * @ZFCP_DBF_SCSI_CMND: scsi command trace record
188 */
189enum zfcp_dbf_scsi_id {
190 ZFCP_DBF_SCSI_CMND = 1,
191};
201 192
202struct zfcp_dbf_scsi_record { 193/**
203 u8 tag[ZFCP_DBF_TAG_SIZE]; 194 * struct zfcp_dbf_scsi - common trace record for SCSI records
204 u8 tag2[ZFCP_DBF_TAG_SIZE]; 195 * @id: unique number of recovery record type
196 * @tag: identifier string specifying the location of initiation
197 * @scsi_id: scsi device id
198 * @scsi_lun: scsi device logical unit number
199 * @scsi_result: scsi result
200 * @scsi_retries: current retry number of scsi request
201 * @scsi_allowed: allowed retries
202 * @fcp_rsp_info: FCP response info
203 * @scsi_opcode: scsi opcode
204 * @fsf_req_id: request id of fsf request
205 * @host_scribble: LLD specific data attached to SCSI request
206 * @pl_len: length of paload stored as zfcp_dbf_pay
207 * @fsf_rsp: response for fsf request
208 */
209struct zfcp_dbf_scsi {
210 u8 id;
211 char tag[ZFCP_DBF_TAG_LEN];
205 u32 scsi_id; 212 u32 scsi_id;
206 u32 scsi_lun; 213 u32 scsi_lun;
207 u32 scsi_result; 214 u32 scsi_result;
208 u64 scsi_cmnd;
209 u64 scsi_serial;
210#define ZFCP_DBF_SCSI_OPCODE 16
211 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
212 u8 scsi_retries; 215 u8 scsi_retries;
213 u8 scsi_allowed; 216 u8 scsi_allowed;
214 u64 fsf_reqid; 217 u8 fcp_rsp_info;
215 u32 fsf_seqno; 218#define ZFCP_DBF_SCSI_OPCODE 16
216 u64 fsf_issued; 219 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
217 u64 old_fsf_reqid; 220 u64 fsf_req_id;
218 u8 rsp_validity; 221 u64 host_scribble;
219 u8 rsp_scsi_status; 222 u16 pl_len;
220 u32 rsp_resid; 223 struct fcp_resp_with_ext fcp_rsp;
221 u8 rsp_code; 224} __packed;
222#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16 225
223#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256 226/**
224 u32 sns_info_len; 227 * struct zfcp_dbf_pay - trace record for unformatted payload information
225 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; 228 * @area: area this record is originated from
226} __attribute__ ((packed)); 229 * @counter: ascending record number
230 * @fsf_req_id: request id of fsf request
231 * @data: unformatted data
232 */
233struct zfcp_dbf_pay {
234 u8 counter;
235 char area[ZFCP_DBF_TAG_LEN];
236 u64 fsf_req_id;
237#define ZFCP_DBF_PAY_MAX_REC 0x100
238 char data[ZFCP_DBF_PAY_MAX_REC];
239} __packed;
227 240
241/**
242 * struct zfcp_dbf - main dbf trace structure
243 * @pay: reference to payload trace area
244 * @rec: reference to recovery trace area
245 * @hba: reference to hba trace area
246 * @san: reference to san trace area
247 * @scsi: reference to scsi trace area
248 * @pay_lock: lock protecting payload trace buffer
249 * @rec_lock: lock protecting recovery trace buffer
250 * @hba_lock: lock protecting hba trace buffer
251 * @san_lock: lock protecting san trace buffer
252 * @scsi_lock: lock protecting scsi trace buffer
253 * @pay_buf: pre-allocated buffer for payload
254 * @rec_buf: pre-allocated buffer for recovery
255 * @hba_buf: pre-allocated buffer for hba
256 * @san_buf: pre-allocated buffer for san
257 * @scsi_buf: pre-allocated buffer for scsi
258 */
228struct zfcp_dbf { 259struct zfcp_dbf {
260 debug_info_t *pay;
229 debug_info_t *rec; 261 debug_info_t *rec;
230 debug_info_t *hba; 262 debug_info_t *hba;
231 debug_info_t *san; 263 debug_info_t *san;
232 debug_info_t *scsi; 264 debug_info_t *scsi;
265 spinlock_t pay_lock;
233 spinlock_t rec_lock; 266 spinlock_t rec_lock;
234 spinlock_t hba_lock; 267 spinlock_t hba_lock;
235 spinlock_t san_lock; 268 spinlock_t san_lock;
236 spinlock_t scsi_lock; 269 spinlock_t scsi_lock;
237 struct zfcp_dbf_rec_record rec_buf; 270 struct zfcp_dbf_pay pay_buf;
238 struct zfcp_dbf_hba_record hba_buf; 271 struct zfcp_dbf_rec rec_buf;
239 struct zfcp_dbf_san_record san_buf; 272 struct zfcp_dbf_hba hba_buf;
240 struct zfcp_dbf_scsi_record scsi_buf; 273 struct zfcp_dbf_san san_buf;
241 struct zfcp_adapter *adapter; 274 struct zfcp_dbf_scsi scsi_buf;
242}; 275};
243 276
244static inline 277static inline
245void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, 278void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
246 struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
247{ 279{
248 if (level <= dbf->hba->level) 280 if (level <= req->adapter->dbf->hba->level)
249 _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); 281 zfcp_dbf_hba_fsf_res(tag, req);
250} 282}
251 283
252/** 284/**
253 * zfcp_dbf_hba_fsf_response - trace event for request completion 285 * zfcp_dbf_hba_fsf_response - trace event for request completion
254 * @fsf_req: request that has been completed 286 * @req: request that has been completed
255 */ 287 */
256static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) 288static inline
289void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
257{ 290{
258 struct zfcp_dbf *dbf = req->adapter->dbf;
259 struct fsf_qtcb *qtcb = req->qtcb; 291 struct fsf_qtcb *qtcb = req->qtcb;
260 292
261 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && 293 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
262 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { 294 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
263 zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); 295 zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
264 296
265 } else if (qtcb->header.fsf_status != FSF_GOOD) { 297 } else if (qtcb->header.fsf_status != FSF_GOOD) {
266 zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); 298 zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
267 299
268 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || 300 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
269 (req->fsf_command == FSF_QTCB_OPEN_LUN)) { 301 (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
270 zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); 302 zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
271 303
272 } else if (qtcb->header.log_length) { 304 } else if (qtcb->header.log_length) {
273 zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); 305 zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
274 306
275 } else { 307 } else {
276 zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); 308 zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
277 } 309 }
278 }
279
280/**
281 * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
282 * @tag: tag indicating which kind of unsolicited status has been received
283 * @dbf: reference to dbf structure
284 * @status_buffer: buffer containing payload of unsolicited status
285 */
286static inline
287void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
288 struct fsf_status_read_buffer *buf)
289{
290 int level = 2;
291
292 if (level <= dbf->hba->level)
293 _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
294} 310}
295 311
296static inline 312static inline
297void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, 313void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
298 struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, 314 struct zfcp_fsf_req *req)
299 struct zfcp_fsf_req *req, unsigned long old_id)
300{ 315{
301 if (level <= dbf->scsi->level) 316 struct zfcp_adapter *adapter = (struct zfcp_adapter *)
302 _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); 317 scmd->device->host->hostdata[0];
318
319 if (level <= adapter->dbf->scsi->level)
320 zfcp_dbf_scsi(tag, scmd, req);
303} 321}
304 322
305/** 323/**
306 * zfcp_dbf_scsi_result - trace event for SCSI command completion 324 * zfcp_dbf_scsi_result - trace event for SCSI command completion
307 * @dbf: adapter dbf trace
308 * @scmd: SCSI command pointer 325 * @scmd: SCSI command pointer
309 * @req: FSF request used to issue SCSI command 326 * @req: FSF request used to issue SCSI command
310 */ 327 */
311static inline 328static inline
312void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, 329void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
313 struct zfcp_fsf_req *req)
314{ 330{
315 if (scmd->result != 0) 331 if (scmd->result != 0)
316 zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0); 332 _zfcp_dbf_scsi("rsl_err", 3, scmd, req);
317 else if (scmd->retries > 0) 333 else if (scmd->retries > 0)
318 zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0); 334 _zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
319 else 335 else
320 zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0); 336 _zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
321} 337}
322 338
323/** 339/**
324 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command 340 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
325 * @dbf: adapter dbf trace
326 * @scmd: SCSI command pointer 341 * @scmd: SCSI command pointer
327 */ 342 */
328static inline 343static inline
329void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd) 344void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
330{ 345{
331 zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0); 346 _zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
332} 347}
333 348
334/** 349/**
335 * zfcp_dbf_scsi_abort - trace event for SCSI command abort 350 * zfcp_dbf_scsi_abort - trace event for SCSI command abort
336 * @tag: tag indicating success or failure of abort operation 351 * @tag: tag indicating success or failure of abort operation
337 * @adapter: adapter thas has been used to issue SCSI command to be aborted
338 * @scmd: SCSI command to be aborted 352 * @scmd: SCSI command to be aborted
339 * @new_req: request containing abort (might be NULL) 353 * @fsf_req: request containing abort (might be NULL)
340 * @old_id: identifier of request containg SCSI command to be aborted
341 */ 354 */
342static inline 355static inline
343void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, 356void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
344 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, 357 struct zfcp_fsf_req *fsf_req)
345 unsigned long old_id)
346{ 358{
347 zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); 359 _zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
348} 360}
349 361
350/** 362/**
351 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset 363 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
352 * @tag: tag indicating success or failure of reset operation 364 * @tag: tag indicating success or failure of reset operation
365 * @scmnd: SCSI command which caused this error recovery
353 * @flag: indicates type of reset (Target Reset, Logical Unit Reset) 366 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
354 * @unit: unit that needs reset
355 * @scsi_cmnd: SCSI command which caused this error recovery
356 */ 367 */
357static inline 368static inline
358void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 369void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
359 struct scsi_cmnd *scsi_cmnd)
360{ 370{
361 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, 371 char tmp_tag[ZFCP_DBF_TAG_LEN];
362 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 372
373 if (flag == FCP_TMF_TGT_RESET)
374 memcpy(tmp_tag, "tr_", 3);
375 else
376 memcpy(tmp_tag, "lr_", 3);
377
378 memcpy(&tmp_tag[3], tag, 4);
379 _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
363} 380}
364 381
365#endif /* ZFCP_DBF_H */ 382#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1c6b6e05a75..527ba48eea57 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -85,11 +85,10 @@ struct zfcp_reqlist;
85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 85#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
86 86
87/* logical unit status */ 87/* logical unit status */
88#define ZFCP_STATUS_UNIT_SHARED 0x00000004 88#define ZFCP_STATUS_LUN_SHARED 0x00000004
89#define ZFCP_STATUS_UNIT_READONLY 0x00000008 89#define ZFCP_STATUS_LUN_READONLY 0x00000008
90 90
91/* FSF request status (this does not have a common part) */ 91/* FSF request status (this does not have a common part) */
92#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
93#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 92#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
94#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 93#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
95#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 94#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
108 mempool_t *scsi_req; 107 mempool_t *scsi_req;
109 mempool_t *scsi_abort; 108 mempool_t *scsi_abort;
110 mempool_t *status_read_req; 109 mempool_t *status_read_req;
111 mempool_t *status_read_data; 110 mempool_t *sr_data;
112 mempool_t *gid_pn; 111 mempool_t *gid_pn;
113 mempool_t *qtcb_pool; 112 mempool_t *qtcb_pool;
114}; 113};
@@ -118,7 +117,7 @@ struct zfcp_erp_action {
118 int action; /* requested action code */ 117 int action; /* requested action code */
119 struct zfcp_adapter *adapter; /* device which should be recovered */ 118 struct zfcp_adapter *adapter; /* device which should be recovered */
120 struct zfcp_port *port; 119 struct zfcp_port *port;
121 struct zfcp_unit *unit; 120 struct scsi_device *sdev;
122 u32 status; /* recovery status */ 121 u32 status; /* recovery status */
123 u32 step; /* active step of this erp action */ 122 u32 step; /* active step of this erp action */
124 unsigned long fsf_req_id; 123 unsigned long fsf_req_id;
@@ -190,6 +189,7 @@ struct zfcp_adapter {
190 struct fsf_qtcb_bottom_port *stats_reset_data; 189 struct fsf_qtcb_bottom_port *stats_reset_data;
191 unsigned long stats_reset; 190 unsigned long stats_reset;
192 struct work_struct scan_work; 191 struct work_struct scan_work;
192 struct work_struct ns_up_work;
193 struct service_level service_level; 193 struct service_level service_level;
194 struct workqueue_struct *work_queue; 194 struct workqueue_struct *work_queue;
195 struct device_dma_parameters dma_parms; 195 struct device_dma_parameters dma_parms;
@@ -219,21 +219,66 @@ struct zfcp_port {
219 unsigned int starget_id; 219 unsigned int starget_id;
220}; 220};
221 221
222/**
223 * struct zfcp_unit - LUN configured via zfcp sysfs
224 * @dev: struct device for sysfs representation and reference counting
225 * @list: entry in LUN/unit list per zfcp_port
226 * @port: reference to zfcp_port where this LUN is configured
227 * @fcp_lun: 64 bit LUN value
228 * @scsi_work: for running scsi_scan_target
229 *
230 * This is the representation of a LUN that has been configured for
231 * usage. The main data here is the 64 bit LUN value, data for
232 * running I/O and recovery is in struct zfcp_scsi_dev.
233 */
222struct zfcp_unit { 234struct zfcp_unit {
223 struct device dev; 235 struct device dev;
224 struct list_head list; /* list of logical units */ 236 struct list_head list;
225 struct zfcp_port *port; /* remote port of unit */ 237 struct zfcp_port *port;
226 atomic_t status; /* status of this logical unit */ 238 u64 fcp_lun;
227 u64 fcp_lun; /* own FCP_LUN */
228 u32 handle; /* handle assigned by FSF */
229 struct scsi_device *device; /* scsi device struct pointer */
230 struct zfcp_erp_action erp_action; /* pending error recovery */
231 atomic_t erp_counter;
232 struct zfcp_latencies latencies;
233 struct work_struct scsi_work; 239 struct work_struct scsi_work;
234}; 240};
235 241
236/** 242/**
243 * struct zfcp_scsi_dev - zfcp data per SCSI device
244 * @status: zfcp internal status flags
245 * @lun_handle: handle from "open lun" for issuing FSF requests
246 * @erp_action: zfcp erp data for opening and recovering this LUN
247 * @erp_counter: zfcp erp counter for this LUN
248 * @latencies: FSF channel and fabric latencies
249 * @port: zfcp_port where this LUN belongs to
250 */
251struct zfcp_scsi_dev {
252 atomic_t status;
253 u32 lun_handle;
254 struct zfcp_erp_action erp_action;
255 atomic_t erp_counter;
256 struct zfcp_latencies latencies;
257 struct zfcp_port *port;
258};
259
260/**
261 * sdev_to_zfcp - Access zfcp LUN data for SCSI device
262 * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
263 */
264static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
265{
266 return scsi_transport_device_data(sdev);
267}
268
269/**
270 * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
271 * @sdev: SCSI device where to get the LUN from
272 */
273static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
274{
275 u64 fcp_lun;
276
277 int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
278 return fcp_lun;
279}
280
281/**
237 * struct zfcp_fsf_req - basic FSF request structure 282 * struct zfcp_fsf_req - basic FSF request structure
238 * @list: list of FSF requests 283 * @list: list of FSF requests
239 * @req_id: unique request ID 284 * @req_id: unique request ID
@@ -249,7 +294,6 @@ struct zfcp_unit {
249 * @erp_action: reference to erp action if request issued on behalf of ERP 294 * @erp_action: reference to erp action if request issued on behalf of ERP
250 * @pool: reference to memory pool if used for this request 295 * @pool: reference to memory pool if used for this request
251 * @issued: time when request was send (STCK) 296 * @issued: time when request was send (STCK)
252 * @unit: reference to unit if this request is a SCSI request
253 * @handler: handler which should be called to process response 297 * @handler: handler which should be called to process response
254 */ 298 */
255struct zfcp_fsf_req { 299struct zfcp_fsf_req {
@@ -267,24 +311,7 @@ struct zfcp_fsf_req {
267 struct zfcp_erp_action *erp_action; 311 struct zfcp_erp_action *erp_action;
268 mempool_t *pool; 312 mempool_t *pool;
269 unsigned long long issued; 313 unsigned long long issued;
270 struct zfcp_unit *unit;
271 void (*handler)(struct zfcp_fsf_req *); 314 void (*handler)(struct zfcp_fsf_req *);
272}; 315};
273 316
274/* driver data */
275struct zfcp_data {
276 struct scsi_host_template scsi_host_template;
277 struct scsi_transport_template *scsi_transport_template;
278 struct kmem_cache *gpn_ft_cache;
279 struct kmem_cache *qtcb_cache;
280 struct kmem_cache *sr_buffer_cache;
281 struct kmem_cache *gid_pn_cache;
282 struct kmem_cache *adisc_cache;
283};
284
285/********************** ZFCP SPECIFIC DEFINES ********************************/
286
287#define ZFCP_SET 0x00000100
288#define ZFCP_CLEAR 0x00000200
289
290#endif /* ZFCP_DEF_H */ 317#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 160b432c907f..e1b4f800e226 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -21,6 +21,7 @@ enum zfcp_erp_act_flags {
21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000, 21 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000, 22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000, 23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
24 ZFCP_STATUS_ERP_NO_REF = 0x00800000,
24}; 25};
25 26
26enum zfcp_erp_steps { 27enum zfcp_erp_steps {
@@ -29,12 +30,12 @@ enum zfcp_erp_steps {
29 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 30 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
30 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 31 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
31 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 32 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
32 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 33 ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
33 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, 34 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
34}; 35};
35 36
36enum zfcp_erp_act_type { 37enum zfcp_erp_act_type {
37 ZFCP_ERP_ACTION_REOPEN_UNIT = 1, 38 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
38 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 39 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
39 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 40 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
40 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 41 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
@@ -56,9 +57,8 @@ enum zfcp_erp_act_result {
56 57
57static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) 58static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
58{ 59{
59 zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, 60 zfcp_erp_clear_adapter_status(adapter,
60 ZFCP_STATUS_COMMON_UNBLOCKED | mask, 61 ZFCP_STATUS_COMMON_UNBLOCKED | mask);
61 ZFCP_CLEAR);
62} 62}
63 63
64static int zfcp_erp_action_exists(struct zfcp_erp_action *act) 64static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
@@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
76 struct zfcp_adapter *adapter = act->adapter; 76 struct zfcp_adapter *adapter = act->adapter;
77 77
78 list_move(&act->list, &act->adapter->erp_ready_head); 78 list_move(&act->list, &act->adapter->erp_ready_head);
79 zfcp_dbf_rec_action("erardy1", act); 79 zfcp_dbf_rec_run("erardy1", act);
80 wake_up(&adapter->erp_ready_wq); 80 wake_up(&adapter->erp_ready_wq);
81 zfcp_dbf_rec_thread("erardy2", adapter->dbf); 81 zfcp_dbf_rec_run("erardy2", act);
82} 82}
83 83
84static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) 84static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -88,24 +88,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
88 zfcp_erp_action_ready(act); 88 zfcp_erp_action_ready(act);
89} 89}
90 90
91static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) 91static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
92{ 92{
93 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 93 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
94 zfcp_erp_action_dismiss(&unit->erp_action); 94
95 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
96 zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
95} 97}
96 98
97static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) 99static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
98{ 100{
99 struct zfcp_unit *unit; 101 struct scsi_device *sdev;
100 102
101 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
102 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
103 else { 105 else
104 read_lock(&port->unit_list_lock); 106 shost_for_each_device(sdev, port->adapter->scsi_host)
105 list_for_each_entry(unit, &port->unit_list, list) 107 if (sdev_to_zfcp(sdev)->port == port)
106 zfcp_erp_action_dismiss_unit(unit); 108 zfcp_erp_action_dismiss_lun(sdev);
107 read_unlock(&port->unit_list_lock);
108 }
109} 109}
110 110
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -124,15 +124,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
124 124
125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
126 struct zfcp_port *port, 126 struct zfcp_port *port,
127 struct zfcp_unit *unit) 127 struct scsi_device *sdev)
128{ 128{
129 int need = want; 129 int need = want;
130 int u_status, p_status, a_status; 130 int l_status, p_status, a_status;
131 struct zfcp_scsi_dev *zfcp_sdev;
131 132
132 switch (want) { 133 switch (want) {
133 case ZFCP_ERP_ACTION_REOPEN_UNIT: 134 case ZFCP_ERP_ACTION_REOPEN_LUN:
134 u_status = atomic_read(&unit->status); 135 zfcp_sdev = sdev_to_zfcp(sdev);
135 if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) 136 l_status = atomic_read(&zfcp_sdev->status);
137 if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
136 return 0; 138 return 0;
137 p_status = atomic_read(&port->status); 139 p_status = atomic_read(&port->status);
138 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || 140 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
@@ -154,6 +156,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
154 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || 156 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
155 a_status & ZFCP_STATUS_COMMON_ERP_FAILED) 157 a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
156 return 0; 158 return 0;
159 if (p_status & ZFCP_STATUS_COMMON_NOESC)
160 return need;
157 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) 161 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
158 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; 162 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
159 /* fall through */ 163 /* fall through */
@@ -169,22 +173,29 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
169 return need; 173 return need;
170} 174}
171 175
172static struct zfcp_erp_action *zfcp_erp_setup_act(int need, 176static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
173 struct zfcp_adapter *adapter, 177 struct zfcp_adapter *adapter,
174 struct zfcp_port *port, 178 struct zfcp_port *port,
175 struct zfcp_unit *unit) 179 struct scsi_device *sdev)
176{ 180{
177 struct zfcp_erp_action *erp_action; 181 struct zfcp_erp_action *erp_action;
178 u32 status = 0; 182 struct zfcp_scsi_dev *zfcp_sdev;
179 183
180 switch (need) { 184 switch (need) {
181 case ZFCP_ERP_ACTION_REOPEN_UNIT: 185 case ZFCP_ERP_ACTION_REOPEN_LUN:
182 if (!get_device(&unit->dev)) 186 zfcp_sdev = sdev_to_zfcp(sdev);
183 return NULL; 187 if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
184 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 188 if (scsi_device_get(sdev))
185 erp_action = &unit->erp_action; 189 return NULL;
186 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 190 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
187 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 191 &zfcp_sdev->status);
192 erp_action = &zfcp_sdev->erp_action;
193 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
194 erp_action->port = port;
195 erp_action->sdev = sdev;
196 if (!(atomic_read(&zfcp_sdev->status) &
197 ZFCP_STATUS_COMMON_RUNNING))
198 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
188 break; 199 break;
189 200
190 case ZFCP_ERP_ACTION_REOPEN_PORT: 201 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -194,8 +205,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
194 zfcp_erp_action_dismiss_port(port); 205 zfcp_erp_action_dismiss_port(port);
195 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 206 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
196 erp_action = &port->erp_action; 207 erp_action = &port->erp_action;
208 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
209 erp_action->port = port;
197 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 210 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
198 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 211 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
199 break; 212 break;
200 213
201 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 214 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
@@ -203,66 +216,65 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
203 zfcp_erp_action_dismiss_adapter(adapter); 216 zfcp_erp_action_dismiss_adapter(adapter);
204 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 217 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
205 erp_action = &adapter->erp_action; 218 erp_action = &adapter->erp_action;
219 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
206 if (!(atomic_read(&adapter->status) & 220 if (!(atomic_read(&adapter->status) &
207 ZFCP_STATUS_COMMON_RUNNING)) 221 ZFCP_STATUS_COMMON_RUNNING))
208 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 222 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
209 break; 223 break;
210 224
211 default: 225 default:
212 return NULL; 226 return NULL;
213 } 227 }
214 228
215 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
216 erp_action->adapter = adapter; 229 erp_action->adapter = adapter;
217 erp_action->port = port;
218 erp_action->unit = unit;
219 erp_action->action = need; 230 erp_action->action = need;
220 erp_action->status = status; 231 erp_action->status = act_status;
221 232
222 return erp_action; 233 return erp_action;
223} 234}
224 235
225static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 236static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
226 struct zfcp_port *port, 237 struct zfcp_port *port,
227 struct zfcp_unit *unit, char *id, void *ref) 238 struct scsi_device *sdev,
239 char *id, u32 act_status)
228{ 240{
229 int retval = 1, need; 241 int retval = 1, need;
230 struct zfcp_erp_action *act = NULL; 242 struct zfcp_erp_action *act;
231 243
232 if (!adapter->erp_thread) 244 if (!adapter->erp_thread)
233 return -EIO; 245 return -EIO;
234 246
235 need = zfcp_erp_required_act(want, adapter, port, unit); 247 need = zfcp_erp_required_act(want, adapter, port, sdev);
236 if (!need) 248 if (!need)
237 goto out; 249 goto out;
238 250
239 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 251 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
240 act = zfcp_erp_setup_act(need, adapter, port, unit);
241 if (!act) 252 if (!act)
242 goto out; 253 goto out;
254 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
243 ++adapter->erp_total_count; 255 ++adapter->erp_total_count;
244 list_add_tail(&act->list, &adapter->erp_ready_head); 256 list_add_tail(&act->list, &adapter->erp_ready_head);
245 wake_up(&adapter->erp_ready_wq); 257 wake_up(&adapter->erp_ready_wq);
246 zfcp_dbf_rec_thread("eracte1", adapter->dbf);
247 retval = 0; 258 retval = 0;
248 out: 259 out:
249 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); 260 zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
250 return retval; 261 return retval;
251} 262}
252 263
253static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 264static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
254 int clear_mask, char *id, void *ref) 265 int clear_mask, char *id)
255{ 266{
256 zfcp_erp_adapter_block(adapter, clear_mask); 267 zfcp_erp_adapter_block(adapter, clear_mask);
257 zfcp_scsi_schedule_rports_block(adapter); 268 zfcp_scsi_schedule_rports_block(adapter);
258 269
259 /* ensure propagation of failed status to new devices */ 270 /* ensure propagation of failed status to new devices */
260 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 271 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
261 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 272 zfcp_erp_set_adapter_status(adapter,
273 ZFCP_STATUS_COMMON_ERP_FAILED);
262 return -EIO; 274 return -EIO;
263 } 275 }
264 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 276 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
265 adapter, NULL, NULL, id, ref); 277 adapter, NULL, NULL, id, 0);
266} 278}
267 279
268/** 280/**
@@ -270,10 +282,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
270 * @adapter: Adapter to reopen. 282 * @adapter: Adapter to reopen.
271 * @clear: Status flags to clear. 283 * @clear: Status flags to clear.
272 * @id: Id for debug trace event. 284 * @id: Id for debug trace event.
273 * @ref: Reference for debug trace event.
274 */ 285 */
275void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, 286void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
276 char *id, void *ref)
277{ 287{
278 unsigned long flags; 288 unsigned long flags;
279 289
@@ -282,10 +292,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
282 292
283 write_lock_irqsave(&adapter->erp_lock, flags); 293 write_lock_irqsave(&adapter->erp_lock, flags);
284 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 294 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
285 zfcp_erp_adapter_failed(adapter, "erareo1", NULL); 295 zfcp_erp_set_adapter_status(adapter,
296 ZFCP_STATUS_COMMON_ERP_FAILED);
286 else 297 else
287 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, 298 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
288 NULL, NULL, id, ref); 299 NULL, NULL, id, 0);
289 write_unlock_irqrestore(&adapter->erp_lock, flags); 300 write_unlock_irqrestore(&adapter->erp_lock, flags);
290} 301}
291 302
@@ -294,13 +305,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
294 * @adapter: Adapter to shut down. 305 * @adapter: Adapter to shut down.
295 * @clear: Status flags to clear. 306 * @clear: Status flags to clear.
296 * @id: Id for debug trace event. 307 * @id: Id for debug trace event.
297 * @ref: Reference for debug trace event.
298 */ 308 */
299void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, 309void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
300 char *id, void *ref) 310 char *id)
301{ 311{
302 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 312 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
303 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); 313 zfcp_erp_adapter_reopen(adapter, clear | flags, id);
304} 314}
305 315
306/** 316/**
@@ -308,38 +318,21 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
308 * @port: Port to shut down. 318 * @port: Port to shut down.
309 * @clear: Status flags to clear. 319 * @clear: Status flags to clear.
310 * @id: Id for debug trace event. 320 * @id: Id for debug trace event.
311 * @ref: Reference for debug trace event.
312 */
313void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
314 void *ref)
315{
316 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
317 zfcp_erp_port_reopen(port, clear | flags, id, ref);
318}
319
320/**
321 * zfcp_erp_unit_shutdown - Shutdown unit
322 * @unit: Unit to shut down.
323 * @clear: Status flags to clear.
324 * @id: Id for debug trace event.
325 * @ref: Reference for debug trace event.
326 */ 321 */
327void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, 322void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
328 void *ref)
329{ 323{
330 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 324 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
331 zfcp_erp_unit_reopen(unit, clear | flags, id, ref); 325 zfcp_erp_port_reopen(port, clear | flags, id);
332} 326}
333 327
334static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 328static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
335{ 329{
336 zfcp_erp_modify_port_status(port, "erpblk1", NULL, 330 zfcp_erp_clear_port_status(port,
337 ZFCP_STATUS_COMMON_UNBLOCKED | clear, 331 ZFCP_STATUS_COMMON_UNBLOCKED | clear);
338 ZFCP_CLEAR);
339} 332}
340 333
341static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, 334static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
342 int clear, char *id, void *ref) 335 char *id)
343{ 336{
344 zfcp_erp_port_block(port, clear); 337 zfcp_erp_port_block(port, clear);
345 zfcp_scsi_schedule_rport_block(port); 338 zfcp_scsi_schedule_rport_block(port);
@@ -348,136 +341,171 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
348 return; 341 return;
349 342
350 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 343 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
351 port->adapter, port, NULL, id, ref); 344 port->adapter, port, NULL, id, 0);
352} 345}
353 346
354/** 347/**
355 * zfcp_erp_port_forced_reopen - Forced close of port and open again 348 * zfcp_erp_port_forced_reopen - Forced close of port and open again
356 * @port: Port to force close and to reopen. 349 * @port: Port to force close and to reopen.
350 * @clear: Status flags to clear.
357 * @id: Id for debug trace event. 351 * @id: Id for debug trace event.
358 * @ref: Reference for debug trace event.
359 */ 352 */
360void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id, 353void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
361 void *ref)
362{ 354{
363 unsigned long flags; 355 unsigned long flags;
364 struct zfcp_adapter *adapter = port->adapter; 356 struct zfcp_adapter *adapter = port->adapter;
365 357
366 write_lock_irqsave(&adapter->erp_lock, flags); 358 write_lock_irqsave(&adapter->erp_lock, flags);
367 _zfcp_erp_port_forced_reopen(port, clear, id, ref); 359 _zfcp_erp_port_forced_reopen(port, clear, id);
368 write_unlock_irqrestore(&adapter->erp_lock, flags); 360 write_unlock_irqrestore(&adapter->erp_lock, flags);
369} 361}
370 362
371static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, 363static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
372 void *ref)
373{ 364{
374 zfcp_erp_port_block(port, clear); 365 zfcp_erp_port_block(port, clear);
375 zfcp_scsi_schedule_rport_block(port); 366 zfcp_scsi_schedule_rport_block(port);
376 367
377 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 368 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
378 /* ensure propagation of failed status to new devices */ 369 /* ensure propagation of failed status to new devices */
379 zfcp_erp_port_failed(port, "erpreo1", NULL); 370 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
380 return -EIO; 371 return -EIO;
381 } 372 }
382 373
383 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 374 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
384 port->adapter, port, NULL, id, ref); 375 port->adapter, port, NULL, id, 0);
385} 376}
386 377
387/** 378/**
388 * zfcp_erp_port_reopen - trigger remote port recovery 379 * zfcp_erp_port_reopen - trigger remote port recovery
389 * @port: port to recover 380 * @port: port to recover
390 * @clear_mask: flags in port status to be cleared 381 * @clear_mask: flags in port status to be cleared
382 * @id: Id for debug trace event.
391 * 383 *
392 * Returns 0 if recovery has been triggered, < 0 if not. 384 * Returns 0 if recovery has been triggered, < 0 if not.
393 */ 385 */
394int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) 386int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
395{ 387{
396 int retval; 388 int retval;
397 unsigned long flags; 389 unsigned long flags;
398 struct zfcp_adapter *adapter = port->adapter; 390 struct zfcp_adapter *adapter = port->adapter;
399 391
400 write_lock_irqsave(&adapter->erp_lock, flags); 392 write_lock_irqsave(&adapter->erp_lock, flags);
401 retval = _zfcp_erp_port_reopen(port, clear, id, ref); 393 retval = _zfcp_erp_port_reopen(port, clear, id);
402 write_unlock_irqrestore(&adapter->erp_lock, flags); 394 write_unlock_irqrestore(&adapter->erp_lock, flags);
403 395
404 return retval; 396 return retval;
405} 397}
406 398
407static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) 399static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
408{ 400{
409 zfcp_erp_modify_unit_status(unit, "erublk1", NULL, 401 zfcp_erp_clear_lun_status(sdev,
410 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, 402 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
411 ZFCP_CLEAR);
412} 403}
413 404
414static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 405static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
415 void *ref) 406 u32 act_status)
416{ 407{
417 struct zfcp_adapter *adapter = unit->port->adapter; 408 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
409 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
418 410
419 zfcp_erp_unit_block(unit, clear); 411 zfcp_erp_lun_block(sdev, clear);
420 412
421 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 413 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
422 return; 414 return;
423 415
424 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, 416 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
425 adapter, unit->port, unit, id, ref); 417 zfcp_sdev->port, sdev, id, act_status);
426} 418}
427 419
428/** 420/**
429 * zfcp_erp_unit_reopen - initiate reopen of a unit 421 * zfcp_erp_lun_reopen - initiate reopen of a LUN
430 * @unit: unit to be reopened 422 * @sdev: SCSI device / LUN to be reopened
431 * @clear_mask: specifies flags in unit status to be cleared 423 * @clear_mask: specifies flags in LUN status to be cleared
424 * @id: Id for debug trace event.
425 *
432 * Return: 0 on success, < 0 on error 426 * Return: 0 on success, < 0 on error
433 */ 427 */
434void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, 428void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
435 void *ref)
436{ 429{
437 unsigned long flags; 430 unsigned long flags;
438 struct zfcp_port *port = unit->port; 431 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
432 struct zfcp_port *port = zfcp_sdev->port;
439 struct zfcp_adapter *adapter = port->adapter; 433 struct zfcp_adapter *adapter = port->adapter;
440 434
441 write_lock_irqsave(&adapter->erp_lock, flags); 435 write_lock_irqsave(&adapter->erp_lock, flags);
442 _zfcp_erp_unit_reopen(unit, clear, id, ref); 436 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
443 write_unlock_irqrestore(&adapter->erp_lock, flags); 437 write_unlock_irqrestore(&adapter->erp_lock, flags);
444} 438}
445 439
446static int status_change_set(unsigned long mask, atomic_t *status) 440/**
441 * zfcp_erp_lun_shutdown - Shutdown LUN
442 * @sdev: SCSI device / LUN to shut down.
443 * @clear: Status flags to clear.
444 * @id: Id for debug trace event.
445 */
446void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
447{ 447{
448 return (atomic_read(status) ^ mask) & mask; 448 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
449 zfcp_erp_lun_reopen(sdev, clear | flags, id);
449} 450}
450 451
451static int status_change_clear(unsigned long mask, atomic_t *status) 452/**
453 * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
454 * @sdev: SCSI device / LUN to shut down.
455 * @id: Id for debug trace event.
456 *
457 * Do not acquire a reference for the LUN when creating the ERP
458 * action. It is safe, because this function waits for the ERP to
459 * complete first. This allows to shutdown the LUN, even when the SCSI
460 * device is in the state SDEV_DEL when scsi_device_get will fail.
461 */
462void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
452{ 463{
453 return atomic_read(status) & mask; 464 unsigned long flags;
465 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
466 struct zfcp_port *port = zfcp_sdev->port;
467 struct zfcp_adapter *adapter = port->adapter;
468 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
469
470 write_lock_irqsave(&adapter->erp_lock, flags);
471 _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
472 write_unlock_irqrestore(&adapter->erp_lock, flags);
473
474 zfcp_erp_wait(adapter);
475}
476
477static int status_change_set(unsigned long mask, atomic_t *status)
478{
479 return (atomic_read(status) ^ mask) & mask;
454} 480}
455 481
456static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 482static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
457{ 483{
458 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 484 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
459 zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); 485 zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
460 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 486 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
461} 487}
462 488
463static void zfcp_erp_port_unblock(struct zfcp_port *port) 489static void zfcp_erp_port_unblock(struct zfcp_port *port)
464{ 490{
465 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 491 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
466 zfcp_dbf_rec_port("erpubl1", NULL, port); 492 zfcp_dbf_rec_run("erpubl1", &port->erp_action);
467 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 493 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
468} 494}
469 495
470static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) 496static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
471{ 497{
472 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) 498 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
473 zfcp_dbf_rec_unit("eruubl1", NULL, unit); 499
474 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); 500 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
501 zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
502 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
475} 503}
476 504
477static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 505static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
478{ 506{
479 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 507 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
480 zfcp_dbf_rec_action("erator1", erp_action); 508 zfcp_dbf_rec_run("erator1", erp_action);
481} 509}
482 510
483static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 511static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -494,11 +522,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
494 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 522 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
495 ZFCP_STATUS_ERP_TIMEDOUT)) { 523 ZFCP_STATUS_ERP_TIMEDOUT)) {
496 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 524 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
497 zfcp_dbf_rec_action("erscf_1", act); 525 zfcp_dbf_rec_run("erscf_1", act);
498 req->erp_action = NULL; 526 req->erp_action = NULL;
499 } 527 }
500 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 528 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
501 zfcp_dbf_rec_action("erscf_2", act); 529 zfcp_dbf_rec_run("erscf_2", act);
502 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) 530 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
503 act->fsf_req_id = 0; 531 act->fsf_req_id = 0;
504 } else 532 } else
@@ -549,41 +577,40 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
549} 577}
550 578
551static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 579static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
552 int clear, char *id, void *ref) 580 int clear, char *id)
553{ 581{
554 struct zfcp_port *port; 582 struct zfcp_port *port;
555 583
556 read_lock(&adapter->port_list_lock); 584 read_lock(&adapter->port_list_lock);
557 list_for_each_entry(port, &adapter->port_list, list) 585 list_for_each_entry(port, &adapter->port_list, list)
558 _zfcp_erp_port_reopen(port, clear, id, ref); 586 _zfcp_erp_port_reopen(port, clear, id);
559 read_unlock(&adapter->port_list_lock); 587 read_unlock(&adapter->port_list_lock);
560} 588}
561 589
562static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 590static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
563 char *id, void *ref) 591 char *id)
564{ 592{
565 struct zfcp_unit *unit; 593 struct scsi_device *sdev;
566 594
567 read_lock(&port->unit_list_lock); 595 shost_for_each_device(sdev, port->adapter->scsi_host)
568 list_for_each_entry(unit, &port->unit_list, list) 596 if (sdev_to_zfcp(sdev)->port == port)
569 _zfcp_erp_unit_reopen(unit, clear, id, ref); 597 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
570 read_unlock(&port->unit_list_lock);
571} 598}
572 599
573static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
574{ 601{
575 switch (act->action) { 602 switch (act->action) {
576 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 603 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
577 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); 604 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
578 break; 605 break;
579 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 606 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
580 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); 607 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
581 break; 608 break;
582 case ZFCP_ERP_ACTION_REOPEN_PORT: 609 case ZFCP_ERP_ACTION_REOPEN_PORT:
583 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); 610 _zfcp_erp_port_reopen(act->port, 0, "ersff_3");
584 break; 611 break;
585 case ZFCP_ERP_ACTION_REOPEN_UNIT: 612 case ZFCP_ERP_ACTION_REOPEN_LUN:
586 _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); 613 _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
587 break; 614 break;
588 } 615 }
589} 616}
@@ -592,13 +619,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
592{ 619{
593 switch (act->action) { 620 switch (act->action) {
594 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 621 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
595 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); 622 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
596 break; 623 break;
597 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 624 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
598 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); 625 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
599 break; 626 break;
600 case ZFCP_ERP_ACTION_REOPEN_PORT: 627 case ZFCP_ERP_ACTION_REOPEN_PORT:
601 _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); 628 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
602 break; 629 break;
603 } 630 }
604} 631}
@@ -617,17 +644,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
617 read_unlock_irqrestore(&adapter->erp_lock, flags); 644 read_unlock_irqrestore(&adapter->erp_lock, flags);
618} 645}
619 646
620static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
621{
622 struct zfcp_qdio *qdio = act->adapter->qdio;
623
624 if (zfcp_qdio_open(qdio))
625 return ZFCP_ERP_FAILED;
626 init_waitqueue_head(&qdio->req_q_wq);
627 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
628 return ZFCP_ERP_SUCCEEDED;
629}
630
631static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) 647static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
632{ 648{
633 struct zfcp_port *port; 649 struct zfcp_port *port;
@@ -635,7 +651,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
635 adapter->peer_d_id); 651 adapter->peer_d_id);
636 if (IS_ERR(port)) /* error or port already attached */ 652 if (IS_ERR(port)) /* error or port already attached */
637 return; 653 return;
638 _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL); 654 _zfcp_erp_port_reopen(port, 0, "ereptp1");
639} 655}
640 656
641static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) 657static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -658,10 +674,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
658 return ZFCP_ERP_FAILED; 674 return ZFCP_ERP_FAILED;
659 } 675 }
660 676
661 zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
662 wait_event(adapter->erp_ready_wq, 677 wait_event(adapter->erp_ready_wq,
663 !list_empty(&adapter->erp_ready_head)); 678 !list_empty(&adapter->erp_ready_head));
664 zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
665 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) 679 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
666 break; 680 break;
667 681
@@ -700,10 +714,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
700 if (ret) 714 if (ret)
701 return ZFCP_ERP_FAILED; 715 return ZFCP_ERP_FAILED;
702 716
703 zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); 717 zfcp_dbf_rec_run("erasox1", act);
704 wait_event(adapter->erp_ready_wq, 718 wait_event(adapter->erp_ready_wq,
705 !list_empty(&adapter->erp_ready_head)); 719 !list_empty(&adapter->erp_ready_head));
706 zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); 720 zfcp_dbf_rec_run("erasox2", act);
707 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 721 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
708 return ZFCP_ERP_FAILED; 722 return ZFCP_ERP_FAILED;
709 723
@@ -718,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
718 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) 732 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
719 return ZFCP_ERP_FAILED; 733 return ZFCP_ERP_FAILED;
720 734
721 if (mempool_resize(act->adapter->pool.status_read_data, 735 if (mempool_resize(act->adapter->pool.sr_data,
722 act->adapter->stat_read_buf_num, GFP_KERNEL)) 736 act->adapter->stat_read_buf_num, GFP_KERNEL))
723 return ZFCP_ERP_FAILED; 737 return ZFCP_ERP_FAILED;
724 738
@@ -742,9 +756,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
742 zfcp_fsf_req_dismiss_all(adapter); 756 zfcp_fsf_req_dismiss_all(adapter);
743 adapter->fsf_req_seq_no = 0; 757 adapter->fsf_req_seq_no = 0;
744 zfcp_fc_wka_ports_force_offline(adapter->gs); 758 zfcp_fc_wka_ports_force_offline(adapter->gs);
745 /* all ports and units are closed */ 759 /* all ports and LUNs are closed */
746 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, 760 zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
747 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
748 761
749 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 762 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
750 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 763 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
@@ -754,7 +767,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
754{ 767{
755 struct zfcp_adapter *adapter = act->adapter; 768 struct zfcp_adapter *adapter = act->adapter;
756 769
757 if (zfcp_erp_adapter_strategy_open_qdio(act)) { 770 if (zfcp_qdio_open(adapter->qdio)) {
758 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 771 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
759 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 772 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
760 &adapter->status); 773 &adapter->status);
@@ -861,7 +874,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
861 struct zfcp_port *port = act->port; 874 struct zfcp_port *port = act->port;
862 875
863 if (port->wwpn != adapter->peer_wwpn) { 876 if (port->wwpn != adapter->peer_wwpn) {
864 zfcp_erp_port_failed(port, "eroptp1", NULL); 877 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
865 return ZFCP_ERP_FAILED; 878 return ZFCP_ERP_FAILED;
866 } 879 }
867 port->d_id = adapter->peer_d_id; 880 port->d_id = adapter->peer_d_id;
@@ -933,82 +946,87 @@ close_init_done:
933 return zfcp_erp_port_strategy_open_common(erp_action); 946 return zfcp_erp_port_strategy_open_common(erp_action);
934} 947}
935 948
936static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 949static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
937{ 950{
951 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
952
938 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 953 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
939 ZFCP_STATUS_UNIT_SHARED | 954 ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
940 ZFCP_STATUS_UNIT_READONLY, 955 &zfcp_sdev->status);
941 &unit->status);
942} 956}
943 957
944static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) 958static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
945{ 959{
946 int retval = zfcp_fsf_close_unit(erp_action); 960 int retval = zfcp_fsf_close_lun(erp_action);
947 if (retval == -ENOMEM) 961 if (retval == -ENOMEM)
948 return ZFCP_ERP_NOMEM; 962 return ZFCP_ERP_NOMEM;
949 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; 963 erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
950 if (retval) 964 if (retval)
951 return ZFCP_ERP_FAILED; 965 return ZFCP_ERP_FAILED;
952 return ZFCP_ERP_CONTINUES; 966 return ZFCP_ERP_CONTINUES;
953} 967}
954 968
955static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) 969static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
956{ 970{
957 int retval = zfcp_fsf_open_unit(erp_action); 971 int retval = zfcp_fsf_open_lun(erp_action);
958 if (retval == -ENOMEM) 972 if (retval == -ENOMEM)
959 return ZFCP_ERP_NOMEM; 973 return ZFCP_ERP_NOMEM;
960 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; 974 erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
961 if (retval) 975 if (retval)
962 return ZFCP_ERP_FAILED; 976 return ZFCP_ERP_FAILED;
963 return ZFCP_ERP_CONTINUES; 977 return ZFCP_ERP_CONTINUES;
964} 978}
965 979
966static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) 980static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
967{ 981{
968 struct zfcp_unit *unit = erp_action->unit; 982 struct scsi_device *sdev = erp_action->sdev;
983 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
969 984
970 switch (erp_action->step) { 985 switch (erp_action->step) {
971 case ZFCP_ERP_STEP_UNINITIALIZED: 986 case ZFCP_ERP_STEP_UNINITIALIZED:
972 zfcp_erp_unit_strategy_clearstati(unit); 987 zfcp_erp_lun_strategy_clearstati(sdev);
973 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 988 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
974 return zfcp_erp_unit_strategy_close(erp_action); 989 return zfcp_erp_lun_strategy_close(erp_action);
975 /* already closed, fall through */ 990 /* already closed, fall through */
976 case ZFCP_ERP_STEP_UNIT_CLOSING: 991 case ZFCP_ERP_STEP_LUN_CLOSING:
977 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 992 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
978 return ZFCP_ERP_FAILED; 993 return ZFCP_ERP_FAILED;
979 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 994 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
980 return ZFCP_ERP_EXIT; 995 return ZFCP_ERP_EXIT;
981 return zfcp_erp_unit_strategy_open(erp_action); 996 return zfcp_erp_lun_strategy_open(erp_action);
982 997
983 case ZFCP_ERP_STEP_UNIT_OPENING: 998 case ZFCP_ERP_STEP_LUN_OPENING:
984 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) 999 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
985 return ZFCP_ERP_SUCCEEDED; 1000 return ZFCP_ERP_SUCCEEDED;
986 } 1001 }
987 return ZFCP_ERP_FAILED; 1002 return ZFCP_ERP_FAILED;
988} 1003}
989 1004
990static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) 1005static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
991{ 1006{
1007 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1008
992 switch (result) { 1009 switch (result) {
993 case ZFCP_ERP_SUCCEEDED : 1010 case ZFCP_ERP_SUCCEEDED :
994 atomic_set(&unit->erp_counter, 0); 1011 atomic_set(&zfcp_sdev->erp_counter, 0);
995 zfcp_erp_unit_unblock(unit); 1012 zfcp_erp_lun_unblock(sdev);
996 break; 1013 break;
997 case ZFCP_ERP_FAILED : 1014 case ZFCP_ERP_FAILED :
998 atomic_inc(&unit->erp_counter); 1015 atomic_inc(&zfcp_sdev->erp_counter);
999 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { 1016 if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
1000 dev_err(&unit->port->adapter->ccw_device->dev, 1017 dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
1001 "ERP failed for unit 0x%016Lx on " 1018 "ERP failed for LUN 0x%016Lx on "
1002 "port 0x%016Lx\n", 1019 "port 0x%016Lx\n",
1003 (unsigned long long)unit->fcp_lun, 1020 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1004 (unsigned long long)unit->port->wwpn); 1021 (unsigned long long)zfcp_sdev->port->wwpn);
1005 zfcp_erp_unit_failed(unit, "erusck1", NULL); 1022 zfcp_erp_set_lun_status(sdev,
1023 ZFCP_STATUS_COMMON_ERP_FAILED);
1006 } 1024 }
1007 break; 1025 break;
1008 } 1026 }
1009 1027
1010 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1028 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
1011 zfcp_erp_unit_block(unit, 0); 1029 zfcp_erp_lun_block(sdev, 0);
1012 result = ZFCP_ERP_EXIT; 1030 result = ZFCP_ERP_EXIT;
1013 } 1031 }
1014 return result; 1032 return result;
@@ -1032,7 +1050,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1032 dev_err(&port->adapter->ccw_device->dev, 1050 dev_err(&port->adapter->ccw_device->dev,
1033 "ERP failed for remote port 0x%016Lx\n", 1051 "ERP failed for remote port 0x%016Lx\n",
1034 (unsigned long long)port->wwpn); 1052 (unsigned long long)port->wwpn);
1035 zfcp_erp_port_failed(port, "erpsck1", NULL); 1053 zfcp_erp_set_port_status(port,
1054 ZFCP_STATUS_COMMON_ERP_FAILED);
1036 } 1055 }
1037 break; 1056 break;
1038 } 1057 }
@@ -1059,7 +1078,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1059 dev_err(&adapter->ccw_device->dev, 1078 dev_err(&adapter->ccw_device->dev,
1060 "ERP cannot recover an error " 1079 "ERP cannot recover an error "
1061 "on the FCP device\n"); 1080 "on the FCP device\n");
1062 zfcp_erp_adapter_failed(adapter, "erasck1", NULL); 1081 zfcp_erp_set_adapter_status(adapter,
1082 ZFCP_STATUS_COMMON_ERP_FAILED);
1063 } 1083 }
1064 break; 1084 break;
1065 } 1085 }
@@ -1076,12 +1096,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
1076{ 1096{
1077 struct zfcp_adapter *adapter = erp_action->adapter; 1097 struct zfcp_adapter *adapter = erp_action->adapter;
1078 struct zfcp_port *port = erp_action->port; 1098 struct zfcp_port *port = erp_action->port;
1079 struct zfcp_unit *unit = erp_action->unit; 1099 struct scsi_device *sdev = erp_action->sdev;
1080 1100
1081 switch (erp_action->action) { 1101 switch (erp_action->action) {
1082 1102
1083 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1103 case ZFCP_ERP_ACTION_REOPEN_LUN:
1084 result = zfcp_erp_strategy_check_unit(unit, result); 1104 result = zfcp_erp_strategy_check_lun(sdev, result);
1085 break; 1105 break;
1086 1106
1087 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1107 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1116,7 +1136,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1116 int action = act->action; 1136 int action = act->action;
1117 struct zfcp_adapter *adapter = act->adapter; 1137 struct zfcp_adapter *adapter = act->adapter;
1118 struct zfcp_port *port = act->port; 1138 struct zfcp_port *port = act->port;
1119 struct zfcp_unit *unit = act->unit; 1139 struct scsi_device *sdev = act->sdev;
1140 struct zfcp_scsi_dev *zfcp_sdev;
1120 u32 erp_status = act->status; 1141 u32 erp_status = act->status;
1121 1142
1122 switch (action) { 1143 switch (action) {
@@ -1124,7 +1145,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1124 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { 1145 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1125 _zfcp_erp_adapter_reopen(adapter, 1146 _zfcp_erp_adapter_reopen(adapter,
1126 ZFCP_STATUS_COMMON_ERP_FAILED, 1147 ZFCP_STATUS_COMMON_ERP_FAILED,
1127 "ersscg1", NULL); 1148 "ersscg1");
1128 return ZFCP_ERP_EXIT; 1149 return ZFCP_ERP_EXIT;
1129 } 1150 }
1130 break; 1151 break;
@@ -1134,16 +1155,17 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1134 if (zfcp_erp_strat_change_det(&port->status, erp_status)) { 1155 if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
1135 _zfcp_erp_port_reopen(port, 1156 _zfcp_erp_port_reopen(port,
1136 ZFCP_STATUS_COMMON_ERP_FAILED, 1157 ZFCP_STATUS_COMMON_ERP_FAILED,
1137 "ersscg2", NULL); 1158 "ersscg2");
1138 return ZFCP_ERP_EXIT; 1159 return ZFCP_ERP_EXIT;
1139 } 1160 }
1140 break; 1161 break;
1141 1162
1142 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1163 case ZFCP_ERP_ACTION_REOPEN_LUN:
1143 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { 1164 zfcp_sdev = sdev_to_zfcp(sdev);
1144 _zfcp_erp_unit_reopen(unit, 1165 if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
1145 ZFCP_STATUS_COMMON_ERP_FAILED, 1166 _zfcp_erp_lun_reopen(sdev,
1146 "ersscg3", NULL); 1167 ZFCP_STATUS_COMMON_ERP_FAILED,
1168 "ersscg3", 0);
1147 return ZFCP_ERP_EXIT; 1169 return ZFCP_ERP_EXIT;
1148 } 1170 }
1149 break; 1171 break;
@@ -1154,6 +1176,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1154static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) 1176static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1155{ 1177{
1156 struct zfcp_adapter *adapter = erp_action->adapter; 1178 struct zfcp_adapter *adapter = erp_action->adapter;
1179 struct zfcp_scsi_dev *zfcp_sdev;
1157 1180
1158 adapter->erp_total_count--; 1181 adapter->erp_total_count--;
1159 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { 1182 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
@@ -1162,12 +1185,13 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1162 } 1185 }
1163 1186
1164 list_del(&erp_action->list); 1187 list_del(&erp_action->list);
1165 zfcp_dbf_rec_action("eractd1", erp_action); 1188 zfcp_dbf_rec_run("eractd1", erp_action);
1166 1189
1167 switch (erp_action->action) { 1190 switch (erp_action->action) {
1168 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1191 case ZFCP_ERP_ACTION_REOPEN_LUN:
1192 zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1169 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1193 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
1170 &erp_action->unit->status); 1194 &zfcp_sdev->status);
1171 break; 1195 break;
1172 1196
1173 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1197 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1187,11 +1211,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1187{ 1211{
1188 struct zfcp_adapter *adapter = act->adapter; 1212 struct zfcp_adapter *adapter = act->adapter;
1189 struct zfcp_port *port = act->port; 1213 struct zfcp_port *port = act->port;
1190 struct zfcp_unit *unit = act->unit; 1214 struct scsi_device *sdev = act->sdev;
1191 1215
1192 switch (act->action) { 1216 switch (act->action) {
1193 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1217 case ZFCP_ERP_ACTION_REOPEN_LUN:
1194 put_device(&unit->dev); 1218 if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1219 scsi_device_put(sdev);
1195 break; 1220 break;
1196 1221
1197 case ZFCP_ERP_ACTION_REOPEN_PORT: 1222 case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1206,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1206 if (result == ZFCP_ERP_SUCCEEDED) { 1231 if (result == ZFCP_ERP_SUCCEEDED) {
1207 register_service_level(&adapter->service_level); 1232 register_service_level(&adapter->service_level);
1208 queue_work(adapter->work_queue, &adapter->scan_work); 1233 queue_work(adapter->work_queue, &adapter->scan_work);
1234 queue_work(adapter->work_queue, &adapter->ns_up_work);
1209 } else 1235 } else
1210 unregister_service_level(&adapter->service_level); 1236 unregister_service_level(&adapter->service_level);
1237
1211 kref_put(&adapter->ref, zfcp_adapter_release); 1238 kref_put(&adapter->ref, zfcp_adapter_release);
1212 break; 1239 break;
1213 } 1240 }
@@ -1222,8 +1249,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1222 return zfcp_erp_port_forced_strategy(erp_action); 1249 return zfcp_erp_port_forced_strategy(erp_action);
1223 case ZFCP_ERP_ACTION_REOPEN_PORT: 1250 case ZFCP_ERP_ACTION_REOPEN_PORT:
1224 return zfcp_erp_port_strategy(erp_action); 1251 return zfcp_erp_port_strategy(erp_action);
1225 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1252 case ZFCP_ERP_ACTION_REOPEN_LUN:
1226 return zfcp_erp_unit_strategy(erp_action); 1253 return zfcp_erp_lun_strategy(erp_action);
1227 } 1254 }
1228 return ZFCP_ERP_FAILED; 1255 return ZFCP_ERP_FAILED;
1229} 1256}
@@ -1267,7 +1294,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1267 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; 1294 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1268 } 1295 }
1269 if (adapter->erp_total_count == adapter->erp_low_mem_count) 1296 if (adapter->erp_total_count == adapter->erp_low_mem_count)
1270 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL); 1297 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
1271 else { 1298 else {
1272 zfcp_erp_strategy_memwait(erp_action); 1299 zfcp_erp_strategy_memwait(erp_action);
1273 retval = ZFCP_ERP_CONTINUES; 1300 retval = ZFCP_ERP_CONTINUES;
@@ -1311,11 +1338,9 @@ static int zfcp_erp_thread(void *data)
1311 unsigned long flags; 1338 unsigned long flags;
1312 1339
1313 for (;;) { 1340 for (;;) {
1314 zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
1315 wait_event_interruptible(adapter->erp_ready_wq, 1341 wait_event_interruptible(adapter->erp_ready_wq,
1316 !list_empty(&adapter->erp_ready_head) || 1342 !list_empty(&adapter->erp_ready_head) ||
1317 kthread_should_stop()); 1343 kthread_should_stop());
1318 zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
1319 1344
1320 if (kthread_should_stop()) 1345 if (kthread_should_stop())
1321 break; 1346 break;
@@ -1376,42 +1401,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1376} 1401}
1377 1402
1378/** 1403/**
1379 * zfcp_erp_adapter_failed - Set adapter status to failed.
1380 * @adapter: Failed adapter.
1381 * @id: Event id for debug trace.
1382 * @ref: Reference for debug trace.
1383 */
1384void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
1385{
1386 zfcp_erp_modify_adapter_status(adapter, id, ref,
1387 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1388}
1389
1390/**
1391 * zfcp_erp_port_failed - Set port status to failed.
1392 * @port: Failed port.
1393 * @id: Event id for debug trace.
1394 * @ref: Reference for debug trace.
1395 */
1396void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
1397{
1398 zfcp_erp_modify_port_status(port, id, ref,
1399 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1400}
1401
1402/**
1403 * zfcp_erp_unit_failed - Set unit status to failed.
1404 * @unit: Failed unit.
1405 * @id: Event id for debug trace.
1406 * @ref: Reference for debug trace.
1407 */
1408void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
1409{
1410 zfcp_erp_modify_unit_status(unit, id, ref,
1411 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1412}
1413
1414/**
1415 * zfcp_erp_wait - wait for completion of error recovery on an adapter 1404 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1416 * @adapter: adapter for which to wait for completion of its error recovery 1405 * @adapter: adapter for which to wait for completion of its error recovery
1417 */ 1406 */
@@ -1423,210 +1412,148 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
1423} 1412}
1424 1413
1425/** 1414/**
1426 * zfcp_erp_modify_adapter_status - change adapter status bits 1415 * zfcp_erp_set_adapter_status - set adapter status bits
1427 * @adapter: adapter to change the status 1416 * @adapter: adapter to change the status
1428 * @id: id for the debug trace
1429 * @ref: reference for the debug trace
1430 * @mask: status bits to change 1417 * @mask: status bits to change
1431 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1432 * 1418 *
1433 * Changes in common status bits are propagated to attached ports and units. 1419 * Changes in common status bits are propagated to attached ports and LUNs.
1434 */ 1420 */
1435void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, 1421void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1436 void *ref, u32 mask, int set_or_clear)
1437{ 1422{
1438 struct zfcp_port *port; 1423 struct zfcp_port *port;
1424 struct scsi_device *sdev;
1439 unsigned long flags; 1425 unsigned long flags;
1440 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1426 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1441 1427
1442 if (set_or_clear == ZFCP_SET) { 1428 atomic_set_mask(mask, &adapter->status);
1443 if (status_change_set(mask, &adapter->status))
1444 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1445 atomic_set_mask(mask, &adapter->status);
1446 } else {
1447 if (status_change_clear(mask, &adapter->status))
1448 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1449 atomic_clear_mask(mask, &adapter->status);
1450 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1451 atomic_set(&adapter->erp_counter, 0);
1452 }
1453 1429
1454 if (common_mask) { 1430 if (!common_mask)
1455 read_lock_irqsave(&adapter->port_list_lock, flags); 1431 return;
1456 list_for_each_entry(port, &adapter->port_list, list) 1432
1457 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1433 read_lock_irqsave(&adapter->port_list_lock, flags);
1458 set_or_clear); 1434 list_for_each_entry(port, &adapter->port_list, list)
1459 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1435 atomic_set_mask(common_mask, &port->status);
1460 } 1436 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1437
1438 shost_for_each_device(sdev, adapter->scsi_host)
1439 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1461} 1440}
1462 1441
1463/** 1442/**
1464 * zfcp_erp_modify_port_status - change port status bits 1443 * zfcp_erp_clear_adapter_status - clear adapter status bits
1465 * @port: port to change the status bits 1444 * @adapter: adapter to change the status
1466 * @id: id for the debug trace
1467 * @ref: reference for the debug trace
1468 * @mask: status bits to change 1445 * @mask: status bits to change
1469 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1470 * 1446 *
1471 * Changes in common status bits are propagated to attached units. 1447 * Changes in common status bits are propagated to attached ports and LUNs.
1472 */ 1448 */
1473void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, 1449void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1474 u32 mask, int set_or_clear)
1475{ 1450{
1476 struct zfcp_unit *unit; 1451 struct zfcp_port *port;
1452 struct scsi_device *sdev;
1477 unsigned long flags; 1453 unsigned long flags;
1478 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1454 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1455 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1479 1456
1480 if (set_or_clear == ZFCP_SET) { 1457 atomic_clear_mask(mask, &adapter->status);
1481 if (status_change_set(mask, &port->status)) 1458
1482 zfcp_dbf_rec_port(id, ref, port); 1459 if (!common_mask)
1483 atomic_set_mask(mask, &port->status); 1460 return;
1484 } else { 1461
1485 if (status_change_clear(mask, &port->status)) 1462 if (clear_counter)
1486 zfcp_dbf_rec_port(id, ref, port); 1463 atomic_set(&adapter->erp_counter, 0);
1487 atomic_clear_mask(mask, &port->status); 1464
1488 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) 1465 read_lock_irqsave(&adapter->port_list_lock, flags);
1466 list_for_each_entry(port, &adapter->port_list, list) {
1467 atomic_clear_mask(common_mask, &port->status);
1468 if (clear_counter)
1489 atomic_set(&port->erp_counter, 0); 1469 atomic_set(&port->erp_counter, 0);
1490 } 1470 }
1471 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1491 1472
1492 if (common_mask) { 1473 shost_for_each_device(sdev, adapter->scsi_host) {
1493 read_lock_irqsave(&port->unit_list_lock, flags); 1474 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1494 list_for_each_entry(unit, &port->unit_list, list) 1475 if (clear_counter)
1495 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1476 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1496 set_or_clear);
1497 read_unlock_irqrestore(&port->unit_list_lock, flags);
1498 } 1477 }
1499} 1478}
1500 1479
1501/** 1480/**
1502 * zfcp_erp_modify_unit_status - change unit status bits 1481 * zfcp_erp_set_port_status - set port status bits
1503 * @unit: unit to change the status bits 1482 * @port: port to change the status
1504 * @id: id for the debug trace
1505 * @ref: reference for the debug trace
1506 * @mask: status bits to change 1483 * @mask: status bits to change
1507 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR 1484 *
1508 */ 1485 * Changes in common status bits are propagated to attached LUNs.
1509void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1510 u32 mask, int set_or_clear)
1511{
1512 if (set_or_clear == ZFCP_SET) {
1513 if (status_change_set(mask, &unit->status))
1514 zfcp_dbf_rec_unit(id, ref, unit);
1515 atomic_set_mask(mask, &unit->status);
1516 } else {
1517 if (status_change_clear(mask, &unit->status))
1518 zfcp_dbf_rec_unit(id, ref, unit);
1519 atomic_clear_mask(mask, &unit->status);
1520 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1521 atomic_set(&unit->erp_counter, 0);
1522 }
1523 }
1524}
1525
1526/**
1527 * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
1528 * @port: The "boxed" port.
1529 * @id: The debug trace id.
1530 * @id: Reference for the debug trace.
1531 */ 1486 */
1532void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1487void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1533{ 1488{
1534 zfcp_erp_modify_port_status(port, id, ref, 1489 struct scsi_device *sdev;
1535 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1490 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1536 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1537}
1538 1491
1539/** 1492 atomic_set_mask(mask, &port->status);
1540 * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
1541 * @port: The "boxed" unit.
1542 * @id: The debug trace id.
1543 * @id: Reference for the debug trace.
1544 */
1545void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1546{
1547 zfcp_erp_modify_unit_status(unit, id, ref,
1548 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1549 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1550}
1551 1493
1552/** 1494 if (!common_mask)
1553 * zfcp_erp_port_access_denied - Adapter denied access to port. 1495 return;
1554 * @port: port where access has been denied 1496
1555 * @id: id for debug trace 1497 shost_for_each_device(sdev, port->adapter->scsi_host)
1556 * @ref: reference for debug trace 1498 if (sdev_to_zfcp(sdev)->port == port)
1557 * 1499 atomic_set_mask(common_mask,
1558 * Since the adapter has denied access, stop using the port and the 1500 &sdev_to_zfcp(sdev)->status);
1559 * attached units.
1560 */
1561void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1562{
1563 zfcp_erp_modify_port_status(port, id, ref,
1564 ZFCP_STATUS_COMMON_ERP_FAILED |
1565 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1566} 1501}
1567 1502
1568/** 1503/**
1569 * zfcp_erp_unit_access_denied - Adapter denied access to unit. 1504 * zfcp_erp_clear_port_status - clear port status bits
1570 * @unit: unit where access has been denied 1505 * @port: adapter to change the status
1571 * @id: id for debug trace 1506 * @mask: status bits to change
1572 * @ref: reference for debug trace
1573 * 1507 *
1574 * Since the adapter has denied access, stop using the unit. 1508 * Changes in common status bits are propagated to attached LUNs.
1575 */ 1509 */
1576void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) 1510void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1577{ 1511{
1578 zfcp_erp_modify_unit_status(unit, id, ref, 1512 struct scsi_device *sdev;
1579 ZFCP_STATUS_COMMON_ERP_FAILED | 1513 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1580 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1514 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1581}
1582 1515
1583static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, 1516 atomic_clear_mask(mask, &port->status);
1584 void *ref) 1517
1585{ 1518 if (!common_mask)
1586 int status = atomic_read(&unit->status);
1587 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1588 ZFCP_STATUS_COMMON_ACCESS_BOXED)))
1589 return; 1519 return;
1590 1520
1591 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1521 if (clear_counter)
1522 atomic_set(&port->erp_counter, 0);
1523
1524 shost_for_each_device(sdev, port->adapter->scsi_host)
1525 if (sdev_to_zfcp(sdev)->port == port) {
1526 atomic_clear_mask(common_mask,
1527 &sdev_to_zfcp(sdev)->status);
1528 if (clear_counter)
1529 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1530 }
1592} 1531}
1593 1532
1594static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, 1533/**
1595 void *ref) 1534 * zfcp_erp_set_lun_status - set lun status bits
1535 * @sdev: SCSI device / lun to set the status bits
1536 * @mask: status bits to change
1537 */
1538void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
1596{ 1539{
1597 struct zfcp_unit *unit; 1540 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1598 unsigned long flags;
1599 int status = atomic_read(&port->status);
1600
1601 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1602 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1603 read_lock_irqsave(&port->unit_list_lock, flags);
1604 list_for_each_entry(unit, &port->unit_list, list)
1605 zfcp_erp_unit_access_changed(unit, id, ref);
1606 read_unlock_irqrestore(&port->unit_list_lock, flags);
1607 return;
1608 }
1609 1541
1610 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1542 atomic_set_mask(mask, &zfcp_sdev->status);
1611} 1543}
1612 1544
1613/** 1545/**
1614 * zfcp_erp_adapter_access_changed - Process change in adapter ACT 1546 * zfcp_erp_clear_lun_status - clear lun status bits
1615 * @adapter: Adapter where the Access Control Table (ACT) changed 1547 * @sdev: SCSi device / lun to clear the status bits
1616 * @id: Id for debug trace 1548 * @mask: status bits to change
1617 * @ref: Reference for debug trace
1618 */ 1549 */
1619void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1550void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
1620 void *ref)
1621{ 1551{
1622 unsigned long flags; 1552 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1623 struct zfcp_port *port;
1624 1553
1625 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1554 atomic_clear_mask(mask, &zfcp_sdev->status);
1626 return;
1627 1555
1628 read_lock_irqsave(&adapter->port_list_lock, flags); 1556 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1629 list_for_each_entry(port, &adapter->port_list, list) 1557 atomic_set(&zfcp_sdev->erp_counter, 0);
1630 zfcp_erp_port_access_changed(port, id, ref);
1631 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1632} 1558}
1559
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3b93239c6f69..03627cfd81cd 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -15,12 +15,10 @@
15#include "zfcp_fc.h" 15#include "zfcp_fc.h"
16 16
17/* zfcp_aux.c */ 17/* zfcp_aux.c */
18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 18extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); 19extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 20extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 21 u32);
23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
24extern void zfcp_sg_free_table(struct scatterlist *, int); 22extern void zfcp_sg_free_table(struct scatterlist *, int);
25extern int zfcp_sg_setup_table(struct scatterlist *, int); 23extern int zfcp_sg_setup_table(struct scatterlist *, int);
26extern void zfcp_device_unregister(struct device *, 24extern void zfcp_device_unregister(struct device *,
@@ -36,66 +34,53 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
36 34
37/* zfcp_cfdc.c */ 35/* zfcp_cfdc.c */
38extern struct miscdevice zfcp_cfdc_misc; 36extern struct miscdevice zfcp_cfdc_misc;
37extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
38extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
39extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
40 union fsf_status_qual *);
41extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
42 struct fsf_qtcb_bottom_support *);
43extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
44
39 45
40/* zfcp_dbf.c */ 46/* zfcp_dbf.c */
41extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); 47extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
42extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); 48extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
43extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); 49extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
44extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); 50 struct zfcp_port *, struct scsi_device *, u8, u8);
45extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); 51extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
46extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); 52extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
47extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); 53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
48extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, 54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
49 struct zfcp_adapter *, struct zfcp_port *,
50 struct zfcp_unit *);
51extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
52extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
53 struct zfcp_dbf *);
54extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
55 struct fsf_status_read_buffer *);
56extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
57extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
58extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32); 56extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
59extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); 57extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
60extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
61extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); 59extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
62extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
63extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
64 struct scsi_cmnd *, struct zfcp_fsf_req *,
65 unsigned long);
66 60
67/* zfcp_erp.c */ 61/* zfcp_erp.c */
68extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, 62extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
69 void *, u32, int); 63extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
70extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); 64extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
71extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, 65extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
72 void *); 66extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
73extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); 67extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
74extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, 68extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
75 int); 69extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
76extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); 70extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
77extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); 71extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
78extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, 72extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
79 void *); 73extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
80extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); 74extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
81extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, 75extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
82 int);
83extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
84extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
85extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
86extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 76extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
87extern void zfcp_erp_thread_kill(struct zfcp_adapter *); 77extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
88extern void zfcp_erp_wait(struct zfcp_adapter *); 78extern void zfcp_erp_wait(struct zfcp_adapter *);
89extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 79extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
90extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
91extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
92extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
93extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
94extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
95 void *);
96extern void zfcp_erp_timeout_handler(unsigned long); 80extern void zfcp_erp_timeout_handler(unsigned long);
97 81
98/* zfcp_fc.c */ 82/* zfcp_fc.c */
83extern struct kmem_cache *zfcp_fc_req_cache;
99extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, 84extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
100 enum fc_host_event_code event_code, u32); 85 enum fc_host_event_code event_code, u32);
101extern void zfcp_fc_post_event(struct work_struct *); 86extern void zfcp_fc_post_event(struct work_struct *);
@@ -111,15 +96,17 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
111extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 96extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
112extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 97extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
113extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 98extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
99extern void zfcp_fc_sym_name_update(struct work_struct *);
114 100
115/* zfcp_fsf.c */ 101/* zfcp_fsf.c */
102extern struct kmem_cache *zfcp_fsf_qtcb_cache;
116extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 103extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
117extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); 104extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
118extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); 105extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
119extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 106extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
120extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 107extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
121extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 108extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
122extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 109extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
123extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 110extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
124extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, 111extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
125 struct fsf_qtcb_bottom_config *); 112 struct fsf_qtcb_bottom_config *);
@@ -135,12 +122,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
135 mempool_t *, unsigned int); 122 mempool_t *, unsigned int);
136extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 123extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
137 struct zfcp_fsf_ct_els *, unsigned int); 124 struct zfcp_fsf_ct_els *, unsigned int);
138extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 125extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
139 struct scsi_cmnd *);
140extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 126extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
141extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); 127extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
142extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, 128extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
143 struct zfcp_unit *);
144extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); 129extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
145 130
146/* zfcp_qdio.c */ 131/* zfcp_qdio.c */
@@ -153,18 +138,18 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
153extern int zfcp_qdio_open(struct zfcp_qdio *); 138extern int zfcp_qdio_open(struct zfcp_qdio *);
154extern void zfcp_qdio_close(struct zfcp_qdio *); 139extern void zfcp_qdio_close(struct zfcp_qdio *);
155extern void zfcp_qdio_siosl(struct zfcp_adapter *); 140extern void zfcp_qdio_siosl(struct zfcp_adapter *);
141extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
142 struct qdio_buffer *);
156 143
157/* zfcp_scsi.c */ 144/* zfcp_scsi.c */
158extern struct zfcp_data zfcp_data; 145extern struct scsi_transport_template *zfcp_scsi_transport_template;
159extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 146extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
160extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 147extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
161extern struct fc_function_template zfcp_transport_functions; 148extern struct fc_function_template zfcp_transport_functions;
162extern void zfcp_scsi_rport_work(struct work_struct *); 149extern void zfcp_scsi_rport_work(struct work_struct *);
163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 150extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
164extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); 151extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
165extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); 152extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
166extern void zfcp_scsi_scan(struct zfcp_unit *);
167extern void zfcp_scsi_scan_work(struct work_struct *);
168extern void zfcp_scsi_set_prot(struct zfcp_adapter *); 153extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
169extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); 154extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
170 155
@@ -175,4 +160,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs;
175extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 160extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
176extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 161extern struct device_attribute *zfcp_sysfs_shost_attrs[];
177 162
163/* zfcp_unit.c */
164extern int zfcp_unit_add(struct zfcp_port *, u64);
165extern int zfcp_unit_remove(struct zfcp_port *, u64);
166extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
167extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
168extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
169extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
170extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
171
178#endif /* ZFCP_EXT_H */ 172#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6f3ed2b9a349..297e6b71ce9c 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,11 +11,14 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/utsname.h>
14#include <scsi/fc/fc_els.h> 15#include <scsi/fc/fc_els.h>
15#include <scsi/libfc.h> 16#include <scsi/libfc.h>
16#include "zfcp_ext.h" 17#include "zfcp_ext.h"
17#include "zfcp_fc.h" 18#include "zfcp_fc.h"
18 19
20struct kmem_cache *zfcp_fc_req_cache;
21
19static u32 zfcp_fc_rscn_range_mask[] = { 22static u32 zfcp_fc_rscn_range_mask[] = {
20 [ELS_ADDR_FMT_PORT] = 0xFFFFFF, 23 [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
21 [ELS_ADDR_FMT_AREA] = 0xFFFF00, 24 [ELS_ADDR_FMT_AREA] = 0xFFFF00,
@@ -174,7 +177,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
174 if (!port->d_id) 177 if (!port->d_id)
175 zfcp_erp_port_reopen(port, 178 zfcp_erp_port_reopen(port,
176 ZFCP_STATUS_COMMON_ERP_FAILED, 179 ZFCP_STATUS_COMMON_ERP_FAILED,
177 "fcrscn1", NULL); 180 "fcrscn1");
178 } 181 }
179 read_unlock_irqrestore(&adapter->port_list_lock, flags); 182 read_unlock_irqrestore(&adapter->port_list_lock, flags);
180} 183}
@@ -215,7 +218,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
215 read_lock_irqsave(&adapter->port_list_lock, flags); 218 read_lock_irqsave(&adapter->port_list_lock, flags);
216 list_for_each_entry(port, &adapter->port_list, list) 219 list_for_each_entry(port, &adapter->port_list, list)
217 if (port->wwpn == wwpn) { 220 if (port->wwpn == wwpn) {
218 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); 221 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
219 break; 222 break;
220 } 223 }
221 read_unlock_irqrestore(&adapter->port_list_lock, flags); 224 read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -251,7 +254,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
251 (struct fsf_status_read_buffer *) fsf_req->data; 254 (struct fsf_status_read_buffer *) fsf_req->data;
252 unsigned int els_type = status_buffer->payload.data[0]; 255 unsigned int els_type = status_buffer->payload.data[0];
253 256
254 zfcp_dbf_san_incoming_els(fsf_req); 257 zfcp_dbf_san_in_els("fciels1", fsf_req);
255 if (els_type == ELS_PLOGI) 258 if (els_type == ELS_PLOGI)
256 zfcp_fc_incoming_plogi(fsf_req); 259 zfcp_fc_incoming_plogi(fsf_req);
257 else if (els_type == ELS_LOGO) 260 else if (els_type == ELS_LOGO)
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
260 zfcp_fc_incoming_rscn(fsf_req); 263 zfcp_fc_incoming_rscn(fsf_req);
261} 264}
262 265
263static void zfcp_fc_ns_gid_pn_eval(void *data) 266static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
264{ 267{
265 struct zfcp_fc_gid_pn *gid_pn = data; 268 struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
266 struct zfcp_fsf_ct_els *ct = &gid_pn->ct; 269 struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
267 struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
268 struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
269 struct zfcp_port *port = gid_pn->port;
270 270
271 if (ct->status) 271 if (ct_els->status)
272 return; 272 return;
273 if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC) 273 if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
274 return; 274 return;
275 275
276 /* paranoia */
277 if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
278 return;
279 /* looks like a valid d_id */ 276 /* looks like a valid d_id */
280 port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); 277 ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
281} 278}
282 279
283static void zfcp_fc_complete(void *data) 280static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
285 complete(data); 282 complete(data);
286} 283}
287 284
285static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
286{
287 ct_hdr->ct_rev = FC_CT_REV;
288 ct_hdr->ct_fs_type = FC_FST_DIR;
289 ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
290 ct_hdr->ct_cmd = cmd;
291 ct_hdr->ct_mr_size = mr_size / 4;
292}
293
288static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, 294static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
289 struct zfcp_fc_gid_pn *gid_pn) 295 struct zfcp_fc_req *fc_req)
290{ 296{
291 struct zfcp_adapter *adapter = port->adapter; 297 struct zfcp_adapter *adapter = port->adapter;
292 DECLARE_COMPLETION_ONSTACK(completion); 298 DECLARE_COMPLETION_ONSTACK(completion);
299 struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
300 struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
293 int ret; 301 int ret;
294 302
295 /* setup parameters for send generic command */ 303 /* setup parameters for send generic command */
296 gid_pn->port = port; 304 fc_req->ct_els.port = port;
297 gid_pn->ct.handler = zfcp_fc_complete; 305 fc_req->ct_els.handler = zfcp_fc_complete;
298 gid_pn->ct.handler_data = &completion; 306 fc_req->ct_els.handler_data = &completion;
299 gid_pn->ct.req = &gid_pn->sg_req; 307 fc_req->ct_els.req = &fc_req->sg_req;
300 gid_pn->ct.resp = &gid_pn->sg_resp; 308 fc_req->ct_els.resp = &fc_req->sg_rsp;
301 sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, 309 sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
302 sizeof(struct zfcp_fc_gid_pn_req)); 310 sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
303 sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp, 311
304 sizeof(struct zfcp_fc_gid_pn_resp)); 312 zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
305 313 FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
306 /* setup nameserver request */ 314 gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
307 gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV; 315
308 gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR; 316 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
309 gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
310 gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
311 gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
312 gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
313 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
314
315 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
316 adapter->pool.gid_pn_req, 317 adapter->pool.gid_pn_req,
317 ZFCP_FC_CTELS_TMO); 318 ZFCP_FC_CTELS_TMO);
318 if (!ret) { 319 if (!ret) {
319 wait_for_completion(&completion); 320 wait_for_completion(&completion);
320 zfcp_fc_ns_gid_pn_eval(gid_pn); 321 zfcp_fc_ns_gid_pn_eval(fc_req);
321 } 322 }
322 return ret; 323 return ret;
323} 324}
324 325
325/** 326/**
326 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request 327 * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
327 * @port: port where GID_PN request is needed 328 * @port: port where GID_PN request is needed
328 * return: -ENOMEM on error, 0 otherwise 329 * return: -ENOMEM on error, 0 otherwise
329 */ 330 */
330static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) 331static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
331{ 332{
332 int ret; 333 int ret;
333 struct zfcp_fc_gid_pn *gid_pn; 334 struct zfcp_fc_req *fc_req;
334 struct zfcp_adapter *adapter = port->adapter; 335 struct zfcp_adapter *adapter = port->adapter;
335 336
336 gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); 337 fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
337 if (!gid_pn) 338 if (!fc_req)
338 return -ENOMEM; 339 return -ENOMEM;
339 340
340 memset(gid_pn, 0, sizeof(*gid_pn)); 341 memset(fc_req, 0, sizeof(*fc_req));
341 342
342 ret = zfcp_fc_wka_port_get(&adapter->gs->ds); 343 ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
343 if (ret) 344 if (ret)
344 goto out; 345 goto out;
345 346
346 ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); 347 ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
347 348
348 zfcp_fc_wka_port_put(&adapter->gs->ds); 349 zfcp_fc_wka_port_put(&adapter->gs->ds);
349out: 350out:
350 mempool_free(gid_pn, adapter->pool.gid_pn); 351 mempool_free(fc_req, adapter->pool.gid_pn);
351 return ret; 352 return ret;
352} 353}
353 354
@@ -360,16 +361,16 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
360 ret = zfcp_fc_ns_gid_pn(port); 361 ret = zfcp_fc_ns_gid_pn(port);
361 if (ret) { 362 if (ret) {
362 /* could not issue gid_pn for some reason */ 363 /* could not issue gid_pn for some reason */
363 zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); 364 zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
364 goto out; 365 goto out;
365 } 366 }
366 367
367 if (!port->d_id) { 368 if (!port->d_id) {
368 zfcp_erp_port_failed(port, "fcgpn_2", NULL); 369 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
369 goto out; 370 goto out;
370 } 371 }
371 372
372 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 373 zfcp_erp_port_reopen(port, 0, "fcgpn_3");
373out: 374out:
374 put_device(&port->dev); 375 put_device(&port->dev);
375} 376}
@@ -419,14 +420,14 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
419 420
420static void zfcp_fc_adisc_handler(void *data) 421static void zfcp_fc_adisc_handler(void *data)
421{ 422{
422 struct zfcp_fc_els_adisc *adisc = data; 423 struct zfcp_fc_req *fc_req = data;
423 struct zfcp_port *port = adisc->els.port; 424 struct zfcp_port *port = fc_req->ct_els.port;
424 struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; 425 struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
425 426
426 if (adisc->els.status) { 427 if (fc_req->ct_els.status) {
427 /* request rejected or timed out */ 428 /* request rejected or timed out */
428 zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 429 zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
429 "fcadh_1", NULL); 430 "fcadh_1");
430 goto out; 431 goto out;
431 } 432 }
432 433
@@ -436,7 +437,7 @@ static void zfcp_fc_adisc_handler(void *data)
436 if ((port->wwpn != adisc_resp->adisc_wwpn) || 437 if ((port->wwpn != adisc_resp->adisc_wwpn) ||
437 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { 438 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
438 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 439 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
439 "fcadh_2", NULL); 440 "fcadh_2");
440 goto out; 441 goto out;
441 } 442 }
442 443
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
445 out: 446 out:
446 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 447 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
447 put_device(&port->dev); 448 put_device(&port->dev);
448 kmem_cache_free(zfcp_data.adisc_cache, adisc); 449 kmem_cache_free(zfcp_fc_req_cache, fc_req);
449} 450}
450 451
451static int zfcp_fc_adisc(struct zfcp_port *port) 452static int zfcp_fc_adisc(struct zfcp_port *port)
452{ 453{
453 struct zfcp_fc_els_adisc *adisc; 454 struct zfcp_fc_req *fc_req;
454 struct zfcp_adapter *adapter = port->adapter; 455 struct zfcp_adapter *adapter = port->adapter;
456 struct Scsi_Host *shost = adapter->scsi_host;
455 int ret; 457 int ret;
456 458
457 adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC); 459 fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
458 if (!adisc) 460 if (!fc_req)
459 return -ENOMEM; 461 return -ENOMEM;
460 462
461 adisc->els.port = port; 463 fc_req->ct_els.port = port;
462 adisc->els.req = &adisc->req; 464 fc_req->ct_els.req = &fc_req->sg_req;
463 adisc->els.resp = &adisc->resp; 465 fc_req->ct_els.resp = &fc_req->sg_rsp;
464 sg_init_one(adisc->els.req, &adisc->adisc_req, 466 sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
465 sizeof(struct fc_els_adisc)); 467 sizeof(struct fc_els_adisc));
466 sg_init_one(adisc->els.resp, &adisc->adisc_resp, 468 sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
467 sizeof(struct fc_els_adisc)); 469 sizeof(struct fc_els_adisc));
468 470
469 adisc->els.handler = zfcp_fc_adisc_handler; 471 fc_req->ct_els.handler = zfcp_fc_adisc_handler;
470 adisc->els.handler_data = adisc; 472 fc_req->ct_els.handler_data = fc_req;
471 473
472 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 474 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
473 without FC-AL-2 capability, so we don't set it */ 475 without FC-AL-2 capability, so we don't set it */
474 adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); 476 fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
475 adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); 477 fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
476 adisc->adisc_req.adisc_cmd = ELS_ADISC; 478 fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
477 hton24(adisc->adisc_req.adisc_port_id, 479 hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
478 fc_host_port_id(adapter->scsi_host));
479 480
480 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, 481 ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
481 ZFCP_FC_CTELS_TMO); 482 ZFCP_FC_CTELS_TMO);
482 if (ret) 483 if (ret)
483 kmem_cache_free(zfcp_data.adisc_cache, adisc); 484 kmem_cache_free(zfcp_fc_req_cache, fc_req);
484 485
485 return ret; 486 return ret;
486} 487}
@@ -507,7 +508,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
507 508
508 /* send of ADISC was not possible */ 509 /* send of ADISC was not possible */
509 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 510 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
510 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 511 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
511 512
512out: 513out:
513 put_device(&port->dev); 514 put_device(&port->dev);
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
528 put_device(&port->dev); 529 put_device(&port->dev);
529} 530}
530 531
531static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) 532static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
532{ 533{
533 struct scatterlist *sg = &gpn_ft->sg_req; 534 struct zfcp_fc_req *fc_req;
534
535 kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
536 zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
537
538 kfree(gpn_ft);
539}
540 535
541static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num) 536 fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
542{ 537 if (!fc_req)
543 struct zfcp_fc_gpn_ft *gpn_ft;
544 struct zfcp_fc_gpn_ft_req *req;
545
546 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
547 if (!gpn_ft)
548 return NULL; 538 return NULL;
549 539
550 req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); 540 if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
551 if (!req) { 541 kmem_cache_free(zfcp_fc_req_cache, fc_req);
552 kfree(gpn_ft); 542 return NULL;
553 gpn_ft = NULL;
554 goto out;
555 } 543 }
556 sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
557 544
558 if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) { 545 sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
559 zfcp_free_sg_env(gpn_ft, buf_num); 546 sizeof(struct zfcp_fc_gpn_ft_req));
560 gpn_ft = NULL;
561 }
562out:
563 return gpn_ft;
564}
565 547
548 return fc_req;
549}
566 550
567static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, 551static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
568 struct zfcp_adapter *adapter, int max_bytes) 552 struct zfcp_adapter *adapter, int max_bytes)
569{ 553{
570 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; 554 struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
571 struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 555 struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
572 DECLARE_COMPLETION_ONSTACK(completion); 556 DECLARE_COMPLETION_ONSTACK(completion);
573 int ret; 557 int ret;
574 558
575 /* prepare CT IU for GPN_FT */ 559 zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
576 req->ct_hdr.ct_rev = FC_CT_REV;
577 req->ct_hdr.ct_fs_type = FC_FST_DIR;
578 req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
579 req->ct_hdr.ct_options = 0;
580 req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
581 req->ct_hdr.ct_mr_size = max_bytes / 4;
582 req->gpn_ft.fn_domain_id_scope = 0;
583 req->gpn_ft.fn_area_id_scope = 0;
584 req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; 560 req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
585 561
586 /* prepare zfcp_send_ct */ 562 ct_els->handler = zfcp_fc_complete;
587 ct->handler = zfcp_fc_complete; 563 ct_els->handler_data = &completion;
588 ct->handler_data = &completion; 564 ct_els->req = &fc_req->sg_req;
589 ct->req = &gpn_ft->sg_req; 565 ct_els->resp = &fc_req->sg_rsp;
590 ct->resp = gpn_ft->sg_resp;
591 566
592 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, 567 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
593 ZFCP_FC_CTELS_TMO); 568 ZFCP_FC_CTELS_TMO);
594 if (!ret) 569 if (!ret)
595 wait_for_completion(&completion); 570 wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
610 list_move_tail(&port->list, lh); 585 list_move_tail(&port->list, lh);
611} 586}
612 587
613static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, 588static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
614 struct zfcp_adapter *adapter, int max_entries) 589 struct zfcp_adapter *adapter, int max_entries)
615{ 590{
616 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; 591 struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
617 struct scatterlist *sg = gpn_ft->sg_resp; 592 struct scatterlist *sg = &fc_req->sg_rsp;
618 struct fc_ct_hdr *hdr = sg_virt(sg); 593 struct fc_ct_hdr *hdr = sg_virt(sg);
619 struct fc_gpn_ft_resp *acc = sg_virt(sg); 594 struct fc_gpn_ft_resp *acc = sg_virt(sg);
620 struct zfcp_port *port, *tmp; 595 struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
623 u32 d_id; 598 u32 d_id;
624 int ret = 0, x, last = 0; 599 int ret = 0, x, last = 0;
625 600
626 if (ct->status) 601 if (ct_els->status)
627 return -EIO; 602 return -EIO;
628 603
629 if (hdr->ct_cmd != FC_FS_ACC) { 604 if (hdr->ct_cmd != FC_FS_ACC) {
@@ -659,7 +634,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
659 port = zfcp_port_enqueue(adapter, acc->fp_wwpn, 634 port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
660 ZFCP_STATUS_COMMON_NOESC, d_id); 635 ZFCP_STATUS_COMMON_NOESC, d_id);
661 if (!IS_ERR(port)) 636 if (!IS_ERR(port))
662 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); 637 zfcp_erp_port_reopen(port, 0, "fcegpf1");
663 else if (PTR_ERR(port) != -EEXIST) 638 else if (PTR_ERR(port) != -EEXIST)
664 ret = PTR_ERR(port); 639 ret = PTR_ERR(port);
665 } 640 }
@@ -671,7 +646,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
671 write_unlock_irqrestore(&adapter->port_list_lock, flags); 646 write_unlock_irqrestore(&adapter->port_list_lock, flags);
672 647
673 list_for_each_entry_safe(port, tmp, &remove_lh, list) { 648 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
674 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); 649 zfcp_erp_port_shutdown(port, 0, "fcegpf2");
675 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); 650 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
676 } 651 }
677 652
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
687 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, 662 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
688 scan_work); 663 scan_work);
689 int ret, i; 664 int ret, i;
690 struct zfcp_fc_gpn_ft *gpn_ft; 665 struct zfcp_fc_req *fc_req;
691 int chain, max_entries, buf_num, max_bytes; 666 int chain, max_entries, buf_num, max_bytes;
692 667
693 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 668 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
702 if (zfcp_fc_wka_port_get(&adapter->gs->ds)) 677 if (zfcp_fc_wka_port_get(&adapter->gs->ds))
703 return; 678 return;
704 679
705 gpn_ft = zfcp_alloc_sg_env(buf_num); 680 fc_req = zfcp_alloc_sg_env(buf_num);
706 if (!gpn_ft) 681 if (!fc_req)
707 goto out; 682 goto out;
708 683
709 for (i = 0; i < 3; i++) { 684 for (i = 0; i < 3; i++) {
710 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); 685 ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
711 if (!ret) { 686 if (!ret) {
712 ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries); 687 ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
713 if (ret == -EAGAIN) 688 if (ret == -EAGAIN)
714 ssleep(1); 689 ssleep(1);
715 else 690 else
716 break; 691 break;
717 } 692 }
718 } 693 }
719 zfcp_free_sg_env(gpn_ft, buf_num); 694 zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
695 kmem_cache_free(zfcp_fc_req_cache, fc_req);
720out: 696out:
721 zfcp_fc_wka_port_put(&adapter->gs->ds); 697 zfcp_fc_wka_port_put(&adapter->gs->ds);
722} 698}
723 699
700static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
701 struct zfcp_fc_req *fc_req)
702{
703 DECLARE_COMPLETION_ONSTACK(completion);
704 char devno[] = "DEVNO:";
705 struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
706 struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
707 struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
708 int ret;
709
710 zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
711 FC_SYMBOLIC_NAME_SIZE);
712 hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
713
714 sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
715 sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
716
717 ct_els->handler = zfcp_fc_complete;
718 ct_els->handler_data = &completion;
719 ct_els->req = &fc_req->sg_req;
720 ct_els->resp = &fc_req->sg_rsp;
721
722 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
723 ZFCP_FC_CTELS_TMO);
724 if (ret)
725 return ret;
726
727 wait_for_completion(&completion);
728 if (ct_els->status)
729 return ct_els->status;
730
731 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
732 !(strstr(gspn_rsp->gspn.fp_name, devno)))
733 snprintf(fc_host_symbolic_name(adapter->scsi_host),
734 FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
735 gspn_rsp->gspn.fp_name, devno,
736 dev_name(&adapter->ccw_device->dev),
737 init_utsname()->nodename);
738 else
739 strlcpy(fc_host_symbolic_name(adapter->scsi_host),
740 gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
741
742 return 0;
743}
744
745static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
746 struct zfcp_fc_req *fc_req)
747{
748 DECLARE_COMPLETION_ONSTACK(completion);
749 struct Scsi_Host *shost = adapter->scsi_host;
750 struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
751 struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
752 struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
753 int ret, len;
754
755 zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
756 FC_SYMBOLIC_NAME_SIZE);
757 hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
758 len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
759 FC_SYMBOLIC_NAME_SIZE);
760 rspn_req->rspn.fr_name_len = len;
761
762 sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
763 sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
764
765 ct_els->handler = zfcp_fc_complete;
766 ct_els->handler_data = &completion;
767 ct_els->req = &fc_req->sg_req;
768 ct_els->resp = &fc_req->sg_rsp;
769
770 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
771 ZFCP_FC_CTELS_TMO);
772 if (!ret)
773 wait_for_completion(&completion);
774}
775
776/**
777 * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
778 * @work: ns_up_work of the adapter where to update the symbolic port name
779 *
780 * Retrieve the current symbolic port name that may have been set by
781 * the hardware using the GSPN request and update the fc_host
782 * symbolic_name sysfs attribute. When running in NPIV mode (and hence
783 * the port name is unique for this system), update the symbolic port
784 * name to add Linux specific information and update the FC nameserver
785 * using the RSPN request.
786 */
787void zfcp_fc_sym_name_update(struct work_struct *work)
788{
789 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
790 ns_up_work);
791 int ret;
792 struct zfcp_fc_req *fc_req;
793
794 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
795 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
796 return;
797
798 fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
799 if (!fc_req)
800 return;
801
802 ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
803 if (ret)
804 goto out_free;
805
806 ret = zfcp_fc_gspn(adapter, fc_req);
807 if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
808 goto out_ds_put;
809
810 memset(fc_req, 0, sizeof(*fc_req));
811 zfcp_fc_rspn(adapter, fc_req);
812
813out_ds_put:
814 zfcp_fc_wka_port_put(&adapter->gs->ds);
815out_free:
816 kmem_cache_free(zfcp_fc_req_cache, fc_req);
817}
818
724static void zfcp_fc_ct_els_job_handler(void *data) 819static void zfcp_fc_ct_els_job_handler(void *data)
725{ 820{
726 struct fc_bsg_job *job = data; 821 struct fc_bsg_job *job = data;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 938d50360166..4561f3bf7300 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req {
64} __packed; 64} __packed;
65 65
66/** 66/**
67 * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response 67 * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
68 * @ct_hdr: FC GS common transport header 68 * @ct_hdr: FC GS common transport header
69 * @gid_pn: GID_PN response 69 * @gid_pn: GID_PN response
70 */ 70 */
71struct zfcp_fc_gid_pn_resp { 71struct zfcp_fc_gid_pn_rsp {
72 struct fc_ct_hdr ct_hdr; 72 struct fc_ct_hdr ct_hdr;
73 struct fc_gid_pn_resp gid_pn; 73 struct fc_gid_pn_resp gid_pn;
74} __packed; 74} __packed;
75 75
76/** 76/**
77 * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
78 * @ct: data passed to zfcp_fsf for issuing fsf request
79 * @sg_req: scatterlist entry for request data
80 * @sg_resp: scatterlist entry for response data
81 * @gid_pn_req: GID_PN request data
82 * @gid_pn_resp: GID_PN response data
83 */
84struct zfcp_fc_gid_pn {
85 struct zfcp_fsf_ct_els ct;
86 struct scatterlist sg_req;
87 struct scatterlist sg_resp;
88 struct zfcp_fc_gid_pn_req gid_pn_req;
89 struct zfcp_fc_gid_pn_resp gid_pn_resp;
90 struct zfcp_port *port;
91};
92
93/**
94 * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request 77 * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
95 * @ct_hdr: FC GS common transport header 78 * @ct_hdr: FC GS common transport header
96 * @gpn_ft: GPN_FT request 79 * @gpn_ft: GPN_FT request
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
101} __packed; 84} __packed;
102 85
103/** 86/**
104 * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response 87 * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
105 * @ct_hdr: FC GS common transport header 88 * @ct_hdr: FC GS common transport header
106 * @gpn_ft: Array of gpn_ft response data to fill one memory page 89 * @gspn: GSPN_ID request
107 */ 90 */
108struct zfcp_fc_gpn_ft_resp { 91struct zfcp_fc_gspn_req {
109 struct fc_ct_hdr ct_hdr; 92 struct fc_ct_hdr ct_hdr;
110 struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE]; 93 struct fc_gid_pn_resp gspn;
111} __packed; 94} __packed;
112 95
113/** 96/**
114 * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request 97 * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
115 * @ct: data passed to zfcp_fsf for issuing fsf request 98 * @ct_hdr: FC GS common transport header
116 * @sg_req: scatter list entry for gpn_ft request 99 * @gspn: GSPN_ID response
117 * @sg_resp: scatter list entries for gpn_ft responses (per memory page) 100 * @name: The name string of the GSPN_ID response
118 */ 101 */
119struct zfcp_fc_gpn_ft { 102struct zfcp_fc_gspn_rsp {
120 struct zfcp_fsf_ct_els ct; 103 struct fc_ct_hdr ct_hdr;
121 struct scatterlist sg_req; 104 struct fc_gspn_resp gspn;
122 struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; 105 char name[FC_SYMBOLIC_NAME_SIZE];
123}; 106} __packed;
124 107
125/** 108/**
126 * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC 109 * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
127 * @els: data required for issuing els fsf command 110 * @ct_hdr: FC GS common transport header
128 * @req: scatterlist entry for ELS ADISC request 111 * @rspn: RSPN_ID request
129 * @resp: scatterlist entry for ELS ADISC response 112 * @name: The name string of the RSPN_ID request
130 * @adisc_req: ELS ADISC request data
131 * @adisc_resp: ELS ADISC response data
132 */ 113 */
133struct zfcp_fc_els_adisc { 114struct zfcp_fc_rspn_req {
134 struct zfcp_fsf_ct_els els; 115 struct fc_ct_hdr ct_hdr;
135 struct scatterlist req; 116 struct fc_ns_rspn rspn;
136 struct scatterlist resp; 117 char name[FC_SYMBOLIC_NAME_SIZE];
137 struct fc_els_adisc adisc_req; 118} __packed;
138 struct fc_els_adisc adisc_resp; 119
120/**
121 * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
122 * @ct_els: data required for issuing fsf command
123 * @sg_req: scatterlist entry for request data
124 * @sg_rsp: scatterlist entry for response data
125 * @u: request specific data
126 */
127struct zfcp_fc_req {
128 struct zfcp_fsf_ct_els ct_els;
129 struct scatterlist sg_req;
130 struct scatterlist sg_rsp;
131 union {
132 struct {
133 struct fc_els_adisc req;
134 struct fc_els_adisc rsp;
135 } adisc;
136 struct {
137 struct zfcp_fc_gid_pn_req req;
138 struct zfcp_fc_gid_pn_rsp rsp;
139 } gid_pn;
140 struct {
141 struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
142 struct zfcp_fc_gpn_ft_req req;
143 } gpn_ft;
144 struct {
145 struct zfcp_fc_gspn_req req;
146 struct zfcp_fc_gspn_rsp rsp;
147 } gspn;
148 struct {
149 struct zfcp_fc_rspn_req req;
150 struct fc_ct_hdr rsp;
151 } rspn;
152 } u;
139}; 153};
140 154
141/** 155/**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
192 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd 206 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
193 * @fcp: fcp_cmnd to setup 207 * @fcp: fcp_cmnd to setup
194 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB 208 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
209 * @tm: task management flags to setup task management command
195 */ 210 */
196static inline 211static inline
197void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) 212void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
213 u8 tm_flags)
198{ 214{
199 char tag[2]; 215 char tag[2];
200 216
201 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); 217 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
202 218
219 if (unlikely(tm_flags)) {
220 fcp->fc_tm_flags = tm_flags;
221 return;
222 }
223
203 if (scsi_populate_tag_msg(scsi, tag)) { 224 if (scsi_populate_tag_msg(scsi, tag)) {
204 switch (tag[0]) { 225 switch (tag[0]) {
205 case MSG_ORDERED_TAG: 226 case MSG_ORDERED_TAG:
@@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
226} 247}
227 248
228/** 249/**
229 * zfcp_fc_fcp_tm - setup FCP command as task management command
230 * @fcp: fcp_cmnd to setup
231 * @dev: scsi_device where to send the task management command
232 * @tm: task management flags to setup tm command
233 */
234static inline
235void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
236{
237 int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
238 fcp->fc_tm_flags |= tm_flags;
239}
240
241/**
242 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly 250 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
243 * @fcp_rsp: FCP RSP IU to evaluate 251 * @fcp_rsp: FCP RSP IU to evaluate
244 * @scsi: SCSI command where to update status and sense buffer 252 * @scsi: SCSI command where to update status and sense buffer
@@ -270,7 +278,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
270 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { 278 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
271 sense = (char *) &fcp_rsp[1]; 279 sense = (char *) &fcp_rsp[1];
272 if (rsp_flags & FCP_RSP_LEN_VAL) 280 if (rsp_flags & FCP_RSP_LEN_VAL)
273 sense += fcp_rsp->ext.fr_sns_len; 281 sense += fcp_rsp->ext.fr_rsp_len;
274 sense_len = min(fcp_rsp->ext.fr_sns_len, 282 sense_len = min(fcp_rsp->ext.fr_sns_len,
275 (u32) SCSI_SENSE_BUFFERSIZE); 283 (u32) SCSI_SENSE_BUFFERSIZE);
276 memcpy(scsi->sense_buffer, sense, sense_len); 284 memcpy(scsi->sense_buffer, sense, sense_len);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9d1d7d1842ce..022fb6a8cb83 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -18,12 +18,14 @@
18#include "zfcp_qdio.h" 18#include "zfcp_qdio.h"
19#include "zfcp_reqlist.h" 19#include "zfcp_reqlist.h"
20 20
21struct kmem_cache *zfcp_fsf_qtcb_cache;
22
21static void zfcp_fsf_request_timeout_handler(unsigned long data) 23static void zfcp_fsf_request_timeout_handler(unsigned long data)
22{ 24{
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter); 26 zfcp_qdio_siosl(adapter);
25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
26 "fsrth_1", NULL); 28 "fsrth_1");
27} 29}
28 30
29static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 31static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -61,50 +63,11 @@ static u32 fsf_qtcb_type[] = {
61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
62}; 64};
63 65
64static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
65{
66 u16 subtable = table >> 16;
67 u16 rule = table & 0xffff;
68 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
69
70 if (subtable && subtable < ARRAY_SIZE(act_type))
71 dev_warn(&adapter->ccw_device->dev,
72 "Access denied according to ACT rule type %s, "
73 "rule %d\n", act_type[subtable], rule);
74}
75
76static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
77 struct zfcp_port *port)
78{
79 struct fsf_qtcb_header *header = &req->qtcb->header;
80 dev_warn(&req->adapter->ccw_device->dev,
81 "Access denied to port 0x%016Lx\n",
82 (unsigned long long)port->wwpn);
83 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
84 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
85 zfcp_erp_port_access_denied(port, "fspad_1", req);
86 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
87}
88
89static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
90 struct zfcp_unit *unit)
91{
92 struct fsf_qtcb_header *header = &req->qtcb->header;
93 dev_warn(&req->adapter->ccw_device->dev,
94 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
95 (unsigned long long)unit->fcp_lun,
96 (unsigned long long)unit->port->wwpn);
97 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
98 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
99 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101}
102
103static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 66static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
104{ 67{
105 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 68 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
106 "operational because of an unsupported FC class\n"); 69 "operational because of an unsupported FC class\n");
107 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req); 70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
108 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 71 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
109} 72}
110 73
@@ -122,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
122 } 85 }
123 86
124 if (likely(req->qtcb)) 87 if (likely(req->qtcb))
125 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); 88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
126 kfree(req); 89 kfree(req);
127} 90}
128 91
@@ -137,13 +100,13 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
137 read_lock_irqsave(&adapter->port_list_lock, flags); 100 read_lock_irqsave(&adapter->port_list_lock, flags);
138 list_for_each_entry(port, &adapter->port_list, list) 101 list_for_each_entry(port, &adapter->port_list, list)
139 if (port->d_id == d_id) { 102 if (port->d_id == d_id) {
140 zfcp_erp_port_reopen(port, 0, "fssrpc1", req); 103 zfcp_erp_port_reopen(port, 0, "fssrpc1");
141 break; 104 break;
142 } 105 }
143 read_unlock_irqrestore(&adapter->port_list_lock, flags); 106 read_unlock_irqrestore(&adapter->port_list_lock, flags);
144} 107}
145 108
146static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 109static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
147 struct fsf_link_down_info *link_down) 110 struct fsf_link_down_info *link_down)
148{ 111{
149 struct zfcp_adapter *adapter = req->adapter; 112 struct zfcp_adapter *adapter = req->adapter;
@@ -223,7 +186,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
223 "the FC fabric is down\n"); 186 "the FC fabric is down\n");
224 } 187 }
225out: 188out:
226 zfcp_erp_adapter_failed(adapter, id, req); 189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227} 190}
228 191
229static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 192static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
@@ -234,13 +197,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
234 197
235 switch (sr_buf->status_subtype) { 198 switch (sr_buf->status_subtype) {
236 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); 200 zfcp_fsf_link_down_info_eval(req, ldi);
238 break; 201 break;
239 case FSF_STATUS_READ_SUB_FDISC_FAILED: 202 case FSF_STATUS_READ_SUB_FDISC_FAILED:
240 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); 203 zfcp_fsf_link_down_info_eval(req, ldi);
241 break; 204 break;
242 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
243 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); 206 zfcp_fsf_link_down_info_eval(req, NULL);
244 }; 207 };
245} 208}
246 209
@@ -250,13 +213,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
250 struct fsf_status_read_buffer *sr_buf = req->data; 213 struct fsf_status_read_buffer *sr_buf = req->data;
251 214
252 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
253 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); 216 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
254 mempool_free(sr_buf, adapter->pool.status_read_data); 217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
255 zfcp_fsf_req_free(req); 218 zfcp_fsf_req_free(req);
256 return; 219 return;
257 } 220 }
258 221
259 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); 222 zfcp_dbf_hba_fsf_uss("fssrh_2", req);
260 223
261 switch (sr_buf->status_type) { 224 switch (sr_buf->status_type) {
262 case FSF_STATUS_READ_PORT_CLOSED: 225 case FSF_STATUS_READ_PORT_CLOSED:
@@ -271,7 +234,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
271 dev_warn(&adapter->ccw_device->dev, 234 dev_warn(&adapter->ccw_device->dev,
272 "The error threshold for checksum statistics " 235 "The error threshold for checksum statistics "
273 "has been exceeded\n"); 236 "has been exceeded\n");
274 zfcp_dbf_hba_berr(adapter->dbf, req); 237 zfcp_dbf_hba_bit_err("fssrh_3", req);
275 break; 238 break;
276 case FSF_STATUS_READ_LINK_DOWN: 239 case FSF_STATUS_READ_LINK_DOWN:
277 zfcp_fsf_status_read_link_down(req); 240 zfcp_fsf_status_read_link_down(req);
@@ -281,32 +244,30 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
281 dev_info(&adapter->ccw_device->dev, 244 dev_info(&adapter->ccw_device->dev,
282 "The local link has been restored\n"); 245 "The local link has been restored\n");
283 /* All ports should be marked as ready to run again */ 246 /* All ports should be marked as ready to run again */
284 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, 247 zfcp_erp_set_adapter_status(adapter,
285 ZFCP_STATUS_COMMON_RUNNING, 248 ZFCP_STATUS_COMMON_RUNNING);
286 ZFCP_SET);
287 zfcp_erp_adapter_reopen(adapter, 249 zfcp_erp_adapter_reopen(adapter,
288 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
289 ZFCP_STATUS_COMMON_ERP_FAILED, 251 ZFCP_STATUS_COMMON_ERP_FAILED,
290 "fssrh_2", req); 252 "fssrh_2");
291 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
292 254
293 break; 255 break;
294 case FSF_STATUS_READ_NOTIFICATION_LOST: 256 case FSF_STATUS_READ_NOTIFICATION_LOST:
295 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
296 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 258 zfcp_cfdc_adapter_access_changed(adapter);
297 req);
298 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
299 queue_work(adapter->work_queue, &adapter->scan_work); 260 queue_work(adapter->work_queue, &adapter->scan_work);
300 break; 261 break;
301 case FSF_STATUS_READ_CFDC_UPDATED: 262 case FSF_STATUS_READ_CFDC_UPDATED:
302 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 263 zfcp_cfdc_adapter_access_changed(adapter);
303 break; 264 break;
304 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 265 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
305 adapter->adapter_features = sr_buf->payload.word[0]; 266 adapter->adapter_features = sr_buf->payload.word[0];
306 break; 267 break;
307 } 268 }
308 269
309 mempool_free(sr_buf, adapter->pool.status_read_data); 270 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
310 zfcp_fsf_req_free(req); 271 zfcp_fsf_req_free(req);
311 272
312 atomic_inc(&adapter->stat_miss); 273 atomic_inc(&adapter->stat_miss);
@@ -328,7 +289,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
328 "The FCP adapter reported a problem " 289 "The FCP adapter reported a problem "
329 "that cannot be recovered\n"); 290 "that cannot be recovered\n");
330 zfcp_qdio_siosl(req->adapter); 291 zfcp_qdio_siosl(req->adapter);
331 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); 292 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
332 break; 293 break;
333 } 294 }
334 /* all non-return stats set FSFREQ_ERROR*/ 295 /* all non-return stats set FSFREQ_ERROR*/
@@ -345,7 +306,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
345 dev_err(&req->adapter->ccw_device->dev, 306 dev_err(&req->adapter->ccw_device->dev,
346 "The FCP adapter does not recognize the command 0x%x\n", 307 "The FCP adapter does not recognize the command 0x%x\n",
347 req->qtcb->header.fsf_command); 308 req->qtcb->header.fsf_command);
348 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req); 309 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
349 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 310 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
350 break; 311 break;
351 case FSF_ADAPTER_STATUS_AVAILABLE: 312 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -376,17 +337,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
376 "QTCB version 0x%x not supported by FCP adapter " 337 "QTCB version 0x%x not supported by FCP adapter "
377 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 338 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
378 psq->word[0], psq->word[1]); 339 psq->word[0], psq->word[1]);
379 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req); 340 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
380 break; 341 break;
381 case FSF_PROT_ERROR_STATE: 342 case FSF_PROT_ERROR_STATE:
382 case FSF_PROT_SEQ_NUMB_ERROR: 343 case FSF_PROT_SEQ_NUMB_ERROR:
383 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); 344 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
384 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 345 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
385 break; 346 break;
386 case FSF_PROT_UNSUPP_QTCB_TYPE: 347 case FSF_PROT_UNSUPP_QTCB_TYPE:
387 dev_err(&adapter->ccw_device->dev, 348 dev_err(&adapter->ccw_device->dev,
388 "The QTCB type is not supported by the FCP adapter\n"); 349 "The QTCB type is not supported by the FCP adapter\n");
389 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req); 350 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
390 break; 351 break;
391 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 352 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
392 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 353 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -396,30 +357,28 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
396 dev_err(&adapter->ccw_device->dev, 357 dev_err(&adapter->ccw_device->dev,
397 "0x%Lx is an ambiguous request identifier\n", 358 "0x%Lx is an ambiguous request identifier\n",
398 (unsigned long long)qtcb->bottom.support.req_handle); 359 (unsigned long long)qtcb->bottom.support.req_handle);
399 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); 360 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
400 break; 361 break;
401 case FSF_PROT_LINK_DOWN: 362 case FSF_PROT_LINK_DOWN:
402 zfcp_fsf_link_down_info_eval(req, "fspse_5", 363 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
403 &psq->link_down_info);
404 /* go through reopen to flush pending requests */ 364 /* go through reopen to flush pending requests */
405 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 365 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
406 break; 366 break;
407 case FSF_PROT_REEST_QUEUE: 367 case FSF_PROT_REEST_QUEUE:
408 /* All ports should be marked as ready to run again */ 368 /* All ports should be marked as ready to run again */
409 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, 369 zfcp_erp_set_adapter_status(adapter,
410 ZFCP_STATUS_COMMON_RUNNING, 370 ZFCP_STATUS_COMMON_RUNNING);
411 ZFCP_SET);
412 zfcp_erp_adapter_reopen(adapter, 371 zfcp_erp_adapter_reopen(adapter,
413 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 372 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
414 ZFCP_STATUS_COMMON_ERP_FAILED, 373 ZFCP_STATUS_COMMON_ERP_FAILED,
415 "fspse_8", req); 374 "fspse_8");
416 break; 375 break;
417 default: 376 default:
418 dev_err(&adapter->ccw_device->dev, 377 dev_err(&adapter->ccw_device->dev,
419 "0x%x is not a valid transfer protocol status\n", 378 "0x%x is not a valid transfer protocol status\n",
420 qtcb->prefix.prot_status); 379 qtcb->prefix.prot_status);
421 zfcp_qdio_siosl(adapter); 380 zfcp_qdio_siosl(adapter);
422 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); 381 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
423 } 382 }
424 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 383 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
425} 384}
@@ -525,7 +484,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
525 dev_err(&adapter->ccw_device->dev, 484 dev_err(&adapter->ccw_device->dev,
526 "Unknown or unsupported arbitrated loop " 485 "Unknown or unsupported arbitrated loop "
527 "fibre channel topology detected\n"); 486 "fibre channel topology detected\n");
528 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req); 487 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
529 return -EIO; 488 return -EIO;
530 } 489 }
531 490
@@ -561,7 +520,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
561 "FCP adapter maximum QTCB size (%d bytes) " 520 "FCP adapter maximum QTCB size (%d bytes) "
562 "is too small\n", 521 "is too small\n",
563 bottom->max_qtcb_size); 522 bottom->max_qtcb_size);
564 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req); 523 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
565 return; 524 return;
566 } 525 }
567 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 526 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -575,14 +534,11 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
575 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 534 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
576 adapter->hydra_version = 0; 535 adapter->hydra_version = 0;
577 536
578 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 537 zfcp_fsf_link_down_info_eval(req,
579 &adapter->status);
580
581 zfcp_fsf_link_down_info_eval(req, "fsecdh2",
582 &qtcb->header.fsf_status_qual.link_down_info); 538 &qtcb->header.fsf_status_qual.link_down_info);
583 break; 539 break;
584 default: 540 default:
585 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req); 541 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
586 return; 542 return;
587 } 543 }
588 544
@@ -598,14 +554,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
598 dev_err(&adapter->ccw_device->dev, 554 dev_err(&adapter->ccw_device->dev,
599 "The FCP adapter only supports newer " 555 "The FCP adapter only supports newer "
600 "control block versions\n"); 556 "control block versions\n");
601 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req); 557 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
602 return; 558 return;
603 } 559 }
604 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 560 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
605 dev_err(&adapter->ccw_device->dev, 561 dev_err(&adapter->ccw_device->dev,
606 "The FCP adapter only supports older " 562 "The FCP adapter only supports older "
607 "control block versions\n"); 563 "control block versions\n");
608 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req); 564 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
609 } 565 }
610} 566}
611 567
@@ -644,7 +600,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
644 break; 600 break;
645 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 601 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
646 zfcp_fsf_exchange_port_evaluate(req); 602 zfcp_fsf_exchange_port_evaluate(req);
647 zfcp_fsf_link_down_info_eval(req, "fsepdh1", 603 zfcp_fsf_link_down_info_eval(req,
648 &qtcb->header.fsf_status_qual.link_down_info); 604 &qtcb->header.fsf_status_qual.link_down_info);
649 break; 605 break;
650 } 606 }
@@ -674,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
674 if (likely(pool)) 630 if (likely(pool))
675 qtcb = mempool_alloc(pool, GFP_ATOMIC); 631 qtcb = mempool_alloc(pool, GFP_ATOMIC);
676 else 632 else
677 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); 633 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
678 634
679 if (unlikely(!qtcb)) 635 if (unlikely(!qtcb))
680 return NULL; 636 return NULL;
@@ -684,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
684} 640}
685 641
686static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
687 u32 fsf_cmd, u32 sbtype, 643 u32 fsf_cmd, u8 sbtype,
688 mempool_t *pool) 644 mempool_t *pool)
689{ 645{
690 struct zfcp_adapter *adapter = qdio->adapter; 646 struct zfcp_adapter *adapter = qdio->adapter;
@@ -746,7 +702,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
746 del_timer(&req->timer); 702 del_timer(&req->timer);
747 /* lookup request again, list might have changed */ 703 /* lookup request again, list might have changed */
748 zfcp_reqlist_find_rm(adapter->req_list, req_id); 704 zfcp_reqlist_find_rm(adapter->req_list, req_id);
749 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); 705 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
750 return -EIO; 706 return -EIO;
751 } 707 }
752 708
@@ -769,9 +725,10 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
769 struct zfcp_adapter *adapter = qdio->adapter; 725 struct zfcp_adapter *adapter = qdio->adapter;
770 struct zfcp_fsf_req *req; 726 struct zfcp_fsf_req *req;
771 struct fsf_status_read_buffer *sr_buf; 727 struct fsf_status_read_buffer *sr_buf;
728 struct page *page;
772 int retval = -EIO; 729 int retval = -EIO;
773 730
774 spin_lock_bh(&qdio->req_q_lock); 731 spin_lock_irq(&qdio->req_q_lock);
775 if (zfcp_qdio_sbal_get(qdio)) 732 if (zfcp_qdio_sbal_get(qdio))
776 goto out; 733 goto out;
777 734
@@ -782,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
782 goto out; 739 goto out;
783 } 740 }
784 741
785 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); 742 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
786 if (!sr_buf) { 743 if (!page) {
787 retval = -ENOMEM; 744 retval = -ENOMEM;
788 goto failed_buf; 745 goto failed_buf;
789 } 746 }
747 sr_buf = page_address(page);
790 memset(sr_buf, 0, sizeof(*sr_buf)); 748 memset(sr_buf, 0, sizeof(*sr_buf));
791 req->data = sr_buf; 749 req->data = sr_buf;
792 750
@@ -800,18 +758,20 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
800 goto out; 758 goto out;
801 759
802failed_req_send: 760failed_req_send:
803 mempool_free(sr_buf, adapter->pool.status_read_data); 761 req->data = NULL;
762 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
804failed_buf: 763failed_buf:
764 zfcp_dbf_hba_fsf_uss("fssr__1", req);
805 zfcp_fsf_req_free(req); 765 zfcp_fsf_req_free(req);
806 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
807out: 766out:
808 spin_unlock_bh(&qdio->req_q_lock); 767 spin_unlock_irq(&qdio->req_q_lock);
809 return retval; 768 return retval;
810} 769}
811 770
812static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 771static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
813{ 772{
814 struct zfcp_unit *unit = req->data; 773 struct scsi_device *sdev = req->data;
774 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
815 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 775 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
816 776
817 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 777 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -820,14 +780,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
820 switch (req->qtcb->header.fsf_status) { 780 switch (req->qtcb->header.fsf_status) {
821 case FSF_PORT_HANDLE_NOT_VALID: 781 case FSF_PORT_HANDLE_NOT_VALID:
822 if (fsq->word[0] == fsq->word[1]) { 782 if (fsq->word[0] == fsq->word[1]) {
823 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 783 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
824 "fsafch1", req); 784 "fsafch1");
825 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 785 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
826 } 786 }
827 break; 787 break;
828 case FSF_LUN_HANDLE_NOT_VALID: 788 case FSF_LUN_HANDLE_NOT_VALID:
829 if (fsq->word[0] == fsq->word[1]) { 789 if (fsq->word[0] == fsq->word[1]) {
830 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); 790 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
831 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 791 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
832 } 792 }
833 break; 793 break;
@@ -835,17 +795,22 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
835 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 795 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
836 break; 796 break;
837 case FSF_PORT_BOXED: 797 case FSF_PORT_BOXED:
838 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 798 zfcp_erp_set_port_status(zfcp_sdev->port,
799 ZFCP_STATUS_COMMON_ACCESS_BOXED);
800 zfcp_erp_port_reopen(zfcp_sdev->port,
801 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
839 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 802 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
840 break; 803 break;
841 case FSF_LUN_BOXED: 804 case FSF_LUN_BOXED:
842 zfcp_erp_unit_boxed(unit, "fsafch4", req); 805 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
806 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
807 "fsafch4");
843 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 808 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
844 break; 809 break;
845 case FSF_ADAPTER_STATUS_AVAILABLE: 810 case FSF_ADAPTER_STATUS_AVAILABLE:
846 switch (fsq->word[0]) { 811 switch (fsq->word[0]) {
847 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 812 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
848 zfcp_fc_test_link(unit->port); 813 zfcp_fc_test_link(zfcp_sdev->port);
849 /* fall through */ 814 /* fall through */
850 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 815 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
851 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 816 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -859,39 +824,40 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
859} 824}
860 825
861/** 826/**
862 * zfcp_fsf_abort_fcp_command - abort running SCSI command 827 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
863 * @old_req_id: unsigned long 828 * @scmnd: The SCSI command to abort
864 * @unit: pointer to struct zfcp_unit
865 * Returns: pointer to struct zfcp_fsf_req 829 * Returns: pointer to struct zfcp_fsf_req
866 */ 830 */
867 831
868struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 832struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
869 struct zfcp_unit *unit)
870{ 833{
871 struct zfcp_fsf_req *req = NULL; 834 struct zfcp_fsf_req *req = NULL;
872 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 835 struct scsi_device *sdev = scmnd->device;
836 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
837 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
838 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
873 839
874 spin_lock_bh(&qdio->req_q_lock); 840 spin_lock_irq(&qdio->req_q_lock);
875 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
876 goto out; 842 goto out;
877 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
878 SBAL_FLAGS0_TYPE_READ, 844 SBAL_SFLAGS0_TYPE_READ,
879 qdio->adapter->pool.scsi_abort); 845 qdio->adapter->pool.scsi_abort);
880 if (IS_ERR(req)) { 846 if (IS_ERR(req)) {
881 req = NULL; 847 req = NULL;
882 goto out; 848 goto out;
883 } 849 }
884 850
885 if (unlikely(!(atomic_read(&unit->status) & 851 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
886 ZFCP_STATUS_COMMON_UNBLOCKED))) 852 ZFCP_STATUS_COMMON_UNBLOCKED)))
887 goto out_error_free; 853 goto out_error_free;
888 854
889 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 855 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
890 856
891 req->data = unit; 857 req->data = sdev;
892 req->handler = zfcp_fsf_abort_fcp_command_handler; 858 req->handler = zfcp_fsf_abort_fcp_command_handler;
893 req->qtcb->header.lun_handle = unit->handle; 859 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
894 req->qtcb->header.port_handle = unit->port->handle; 860 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
895 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 861 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
896 862
897 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 863 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
@@ -902,7 +868,7 @@ out_error_free:
902 zfcp_fsf_req_free(req); 868 zfcp_fsf_req_free(req);
903 req = NULL; 869 req = NULL;
904out: 870out:
905 spin_unlock_bh(&qdio->req_q_lock); 871 spin_unlock_irq(&qdio->req_q_lock);
906 return req; 872 return req;
907} 873}
908 874
@@ -919,7 +885,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
919 885
920 switch (header->fsf_status) { 886 switch (header->fsf_status) {
921 case FSF_GOOD: 887 case FSF_GOOD:
922 zfcp_dbf_san_ct_response(req); 888 zfcp_dbf_san_res("fsscth1", req);
923 ct->status = 0; 889 ct->status = 0;
924 break; 890 break;
925 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 891 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -939,7 +905,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
939 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 905 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
940 break; 906 break;
941 case FSF_PORT_HANDLE_NOT_VALID: 907 case FSF_PORT_HANDLE_NOT_VALID:
942 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 908 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
943 /* fall through */ 909 /* fall through */
944 case FSF_GENERIC_COMMAND_REJECTED: 910 case FSF_GENERIC_COMMAND_REJECTED:
945 case FSF_PAYLOAD_SIZE_MISMATCH: 911 case FSF_PAYLOAD_SIZE_MISMATCH:
@@ -1041,12 +1007,12 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1041 struct zfcp_fsf_req *req; 1007 struct zfcp_fsf_req *req;
1042 int ret = -EIO; 1008 int ret = -EIO;
1043 1009
1044 spin_lock_bh(&qdio->req_q_lock); 1010 spin_lock_irq(&qdio->req_q_lock);
1045 if (zfcp_qdio_sbal_get(qdio)) 1011 if (zfcp_qdio_sbal_get(qdio))
1046 goto out; 1012 goto out;
1047 1013
1048 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1049 SBAL_FLAGS0_TYPE_WRITE_READ, pool); 1015 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1050 1016
1051 if (IS_ERR(req)) { 1017 if (IS_ERR(req)) {
1052 ret = PTR_ERR(req); 1018 ret = PTR_ERR(req);
@@ -1062,7 +1028,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1062 req->qtcb->header.port_handle = wka_port->handle; 1028 req->qtcb->header.port_handle = wka_port->handle;
1063 req->data = ct; 1029 req->data = ct;
1064 1030
1065 zfcp_dbf_san_ct_request(req, wka_port->d_id); 1031 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1066 1032
1067 ret = zfcp_fsf_req_send(req); 1033 ret = zfcp_fsf_req_send(req);
1068 if (ret) 1034 if (ret)
@@ -1073,7 +1039,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1073failed_send: 1039failed_send:
1074 zfcp_fsf_req_free(req); 1040 zfcp_fsf_req_free(req);
1075out: 1041out:
1076 spin_unlock_bh(&qdio->req_q_lock); 1042 spin_unlock_irq(&qdio->req_q_lock);
1077 return ret; 1043 return ret;
1078} 1044}
1079 1045
@@ -1090,7 +1056,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1090 1056
1091 switch (header->fsf_status) { 1057 switch (header->fsf_status) {
1092 case FSF_GOOD: 1058 case FSF_GOOD:
1093 zfcp_dbf_san_els_response(req); 1059 zfcp_dbf_san_res("fsselh1", req);
1094 send_els->status = 0; 1060 send_els->status = 0;
1095 break; 1061 break;
1096 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1062 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1111,11 +1077,13 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1111 case FSF_RESPONSE_SIZE_TOO_LARGE: 1077 case FSF_RESPONSE_SIZE_TOO_LARGE:
1112 break; 1078 break;
1113 case FSF_ACCESS_DENIED: 1079 case FSF_ACCESS_DENIED:
1114 if (port) 1080 if (port) {
1115 zfcp_fsf_access_denied_port(req, port); 1081 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1082 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1083 }
1116 break; 1084 break;
1117 case FSF_SBAL_MISMATCH: 1085 case FSF_SBAL_MISMATCH:
1118 /* should never occure, avoided in zfcp_fsf_send_els */ 1086 /* should never occur, avoided in zfcp_fsf_send_els */
1119 /* fall through */ 1087 /* fall through */
1120 default: 1088 default:
1121 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1089 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1137,12 +1105,12 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1137 struct zfcp_qdio *qdio = adapter->qdio; 1105 struct zfcp_qdio *qdio = adapter->qdio;
1138 int ret = -EIO; 1106 int ret = -EIO;
1139 1107
1140 spin_lock_bh(&qdio->req_q_lock); 1108 spin_lock_irq(&qdio->req_q_lock);
1141 if (zfcp_qdio_sbal_get(qdio)) 1109 if (zfcp_qdio_sbal_get(qdio))
1142 goto out; 1110 goto out;
1143 1111
1144 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1145 SBAL_FLAGS0_TYPE_WRITE_READ, NULL); 1113 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1146 1114
1147 if (IS_ERR(req)) { 1115 if (IS_ERR(req)) {
1148 ret = PTR_ERR(req); 1116 ret = PTR_ERR(req);
@@ -1162,7 +1130,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1162 req->handler = zfcp_fsf_send_els_handler; 1130 req->handler = zfcp_fsf_send_els_handler;
1163 req->data = els; 1131 req->data = els;
1164 1132
1165 zfcp_dbf_san_els_request(req); 1133 zfcp_dbf_san_req("fssels1", req, d_id);
1166 1134
1167 ret = zfcp_fsf_req_send(req); 1135 ret = zfcp_fsf_req_send(req);
1168 if (ret) 1136 if (ret)
@@ -1173,7 +1141,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1173failed_send: 1141failed_send:
1174 zfcp_fsf_req_free(req); 1142 zfcp_fsf_req_free(req);
1175out: 1143out:
1176 spin_unlock_bh(&qdio->req_q_lock); 1144 spin_unlock_irq(&qdio->req_q_lock);
1177 return ret; 1145 return ret;
1178} 1146}
1179 1147
@@ -1183,12 +1151,12 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1183 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1151 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1184 int retval = -EIO; 1152 int retval = -EIO;
1185 1153
1186 spin_lock_bh(&qdio->req_q_lock); 1154 spin_lock_irq(&qdio->req_q_lock);
1187 if (zfcp_qdio_sbal_get(qdio)) 1155 if (zfcp_qdio_sbal_get(qdio))
1188 goto out; 1156 goto out;
1189 1157
1190 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1191 SBAL_FLAGS0_TYPE_READ, 1159 SBAL_SFLAGS0_TYPE_READ,
1192 qdio->adapter->pool.erp_req); 1160 qdio->adapter->pool.erp_req);
1193 1161
1194 if (IS_ERR(req)) { 1162 if (IS_ERR(req)) {
@@ -1215,7 +1183,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1215 erp_action->fsf_req_id = 0; 1183 erp_action->fsf_req_id = 0;
1216 } 1184 }
1217out: 1185out:
1218 spin_unlock_bh(&qdio->req_q_lock); 1186 spin_unlock_irq(&qdio->req_q_lock);
1219 return retval; 1187 return retval;
1220} 1188}
1221 1189
@@ -1225,12 +1193,12 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1225 struct zfcp_fsf_req *req = NULL; 1193 struct zfcp_fsf_req *req = NULL;
1226 int retval = -EIO; 1194 int retval = -EIO;
1227 1195
1228 spin_lock_bh(&qdio->req_q_lock); 1196 spin_lock_irq(&qdio->req_q_lock);
1229 if (zfcp_qdio_sbal_get(qdio)) 1197 if (zfcp_qdio_sbal_get(qdio))
1230 goto out_unlock; 1198 goto out_unlock;
1231 1199
1232 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1233 SBAL_FLAGS0_TYPE_READ, NULL); 1201 SBAL_SFLAGS0_TYPE_READ, NULL);
1234 1202
1235 if (IS_ERR(req)) { 1203 if (IS_ERR(req)) {
1236 retval = PTR_ERR(req); 1204 retval = PTR_ERR(req);
@@ -1251,7 +1219,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1251 1219
1252 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1220 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1253 retval = zfcp_fsf_req_send(req); 1221 retval = zfcp_fsf_req_send(req);
1254 spin_unlock_bh(&qdio->req_q_lock); 1222 spin_unlock_irq(&qdio->req_q_lock);
1255 if (!retval) 1223 if (!retval)
1256 wait_for_completion(&req->completion); 1224 wait_for_completion(&req->completion);
1257 1225
@@ -1259,7 +1227,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1259 return retval; 1227 return retval;
1260 1228
1261out_unlock: 1229out_unlock:
1262 spin_unlock_bh(&qdio->req_q_lock); 1230 spin_unlock_irq(&qdio->req_q_lock);
1263 return retval; 1231 return retval;
1264} 1232}
1265 1233
@@ -1277,12 +1245,12 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1277 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1245 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1278 return -EOPNOTSUPP; 1246 return -EOPNOTSUPP;
1279 1247
1280 spin_lock_bh(&qdio->req_q_lock); 1248 spin_lock_irq(&qdio->req_q_lock);
1281 if (zfcp_qdio_sbal_get(qdio)) 1249 if (zfcp_qdio_sbal_get(qdio))
1282 goto out; 1250 goto out;
1283 1251
1284 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1285 SBAL_FLAGS0_TYPE_READ, 1253 SBAL_SFLAGS0_TYPE_READ,
1286 qdio->adapter->pool.erp_req); 1254 qdio->adapter->pool.erp_req);
1287 1255
1288 if (IS_ERR(req)) { 1256 if (IS_ERR(req)) {
@@ -1304,7 +1272,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1304 erp_action->fsf_req_id = 0; 1272 erp_action->fsf_req_id = 0;
1305 } 1273 }
1306out: 1274out:
1307 spin_unlock_bh(&qdio->req_q_lock); 1275 spin_unlock_irq(&qdio->req_q_lock);
1308 return retval; 1276 return retval;
1309} 1277}
1310 1278
@@ -1323,12 +1291,12 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1323 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1291 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1324 return -EOPNOTSUPP; 1292 return -EOPNOTSUPP;
1325 1293
1326 spin_lock_bh(&qdio->req_q_lock); 1294 spin_lock_irq(&qdio->req_q_lock);
1327 if (zfcp_qdio_sbal_get(qdio)) 1295 if (zfcp_qdio_sbal_get(qdio))
1328 goto out_unlock; 1296 goto out_unlock;
1329 1297
1330 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1331 SBAL_FLAGS0_TYPE_READ, NULL); 1299 SBAL_SFLAGS0_TYPE_READ, NULL);
1332 1300
1333 if (IS_ERR(req)) { 1301 if (IS_ERR(req)) {
1334 retval = PTR_ERR(req); 1302 retval = PTR_ERR(req);
@@ -1343,7 +1311,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1343 req->handler = zfcp_fsf_exchange_port_data_handler; 1311 req->handler = zfcp_fsf_exchange_port_data_handler;
1344 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1312 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1345 retval = zfcp_fsf_req_send(req); 1313 retval = zfcp_fsf_req_send(req);
1346 spin_unlock_bh(&qdio->req_q_lock); 1314 spin_unlock_irq(&qdio->req_q_lock);
1347 1315
1348 if (!retval) 1316 if (!retval)
1349 wait_for_completion(&req->completion); 1317 wait_for_completion(&req->completion);
@@ -1353,7 +1321,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1353 return retval; 1321 return retval;
1354 1322
1355out_unlock: 1323out_unlock:
1356 spin_unlock_bh(&qdio->req_q_lock); 1324 spin_unlock_irq(&qdio->req_q_lock);
1357 return retval; 1325 return retval;
1358} 1326}
1359 1327
@@ -1370,14 +1338,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1370 case FSF_PORT_ALREADY_OPEN: 1338 case FSF_PORT_ALREADY_OPEN:
1371 break; 1339 break;
1372 case FSF_ACCESS_DENIED: 1340 case FSF_ACCESS_DENIED:
1373 zfcp_fsf_access_denied_port(req, port); 1341 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1342 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1374 break; 1343 break;
1375 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1344 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1376 dev_warn(&req->adapter->ccw_device->dev, 1345 dev_warn(&req->adapter->ccw_device->dev,
1377 "Not enough FCP adapter resources to open " 1346 "Not enough FCP adapter resources to open "
1378 "remote port 0x%016Lx\n", 1347 "remote port 0x%016Lx\n",
1379 (unsigned long long)port->wwpn); 1348 (unsigned long long)port->wwpn);
1380 zfcp_erp_port_failed(port, "fsoph_1", req); 1349 zfcp_erp_set_port_status(port,
1350 ZFCP_STATUS_COMMON_ERP_FAILED);
1381 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1351 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1382 break; 1352 break;
1383 case FSF_ADAPTER_STATUS_AVAILABLE: 1353 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1437,12 +1407,12 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1437 struct zfcp_fsf_req *req; 1407 struct zfcp_fsf_req *req;
1438 int retval = -EIO; 1408 int retval = -EIO;
1439 1409
1440 spin_lock_bh(&qdio->req_q_lock); 1410 spin_lock_irq(&qdio->req_q_lock);
1441 if (zfcp_qdio_sbal_get(qdio)) 1411 if (zfcp_qdio_sbal_get(qdio))
1442 goto out; 1412 goto out;
1443 1413
1444 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1445 SBAL_FLAGS0_TYPE_READ, 1415 SBAL_SFLAGS0_TYPE_READ,
1446 qdio->adapter->pool.erp_req); 1416 qdio->adapter->pool.erp_req);
1447 1417
1448 if (IS_ERR(req)) { 1418 if (IS_ERR(req)) {
@@ -1468,7 +1438,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1468 put_device(&port->dev); 1438 put_device(&port->dev);
1469 } 1439 }
1470out: 1440out:
1471 spin_unlock_bh(&qdio->req_q_lock); 1441 spin_unlock_irq(&qdio->req_q_lock);
1472 return retval; 1442 return retval;
1473} 1443}
1474 1444
@@ -1481,15 +1451,13 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1481 1451
1482 switch (req->qtcb->header.fsf_status) { 1452 switch (req->qtcb->header.fsf_status) {
1483 case FSF_PORT_HANDLE_NOT_VALID: 1453 case FSF_PORT_HANDLE_NOT_VALID:
1484 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req); 1454 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1485 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1455 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1486 break; 1456 break;
1487 case FSF_ADAPTER_STATUS_AVAILABLE: 1457 case FSF_ADAPTER_STATUS_AVAILABLE:
1488 break; 1458 break;
1489 case FSF_GOOD: 1459 case FSF_GOOD:
1490 zfcp_erp_modify_port_status(port, "fscph_2", req, 1460 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1491 ZFCP_STATUS_COMMON_OPEN,
1492 ZFCP_CLEAR);
1493 break; 1461 break;
1494 } 1462 }
1495} 1463}
@@ -1505,12 +1473,12 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1505 struct zfcp_fsf_req *req; 1473 struct zfcp_fsf_req *req;
1506 int retval = -EIO; 1474 int retval = -EIO;
1507 1475
1508 spin_lock_bh(&qdio->req_q_lock); 1476 spin_lock_irq(&qdio->req_q_lock);
1509 if (zfcp_qdio_sbal_get(qdio)) 1477 if (zfcp_qdio_sbal_get(qdio))
1510 goto out; 1478 goto out;
1511 1479
1512 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1513 SBAL_FLAGS0_TYPE_READ, 1481 SBAL_SFLAGS0_TYPE_READ,
1514 qdio->adapter->pool.erp_req); 1482 qdio->adapter->pool.erp_req);
1515 1483
1516 if (IS_ERR(req)) { 1484 if (IS_ERR(req)) {
@@ -1534,7 +1502,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1534 erp_action->fsf_req_id = 0; 1502 erp_action->fsf_req_id = 0;
1535 } 1503 }
1536out: 1504out:
1537 spin_unlock_bh(&qdio->req_q_lock); 1505 spin_unlock_irq(&qdio->req_q_lock);
1538 return retval; 1506 return retval;
1539} 1507}
1540 1508
@@ -1580,15 +1548,15 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1580 struct zfcp_fsf_req *req; 1548 struct zfcp_fsf_req *req;
1581 int retval = -EIO; 1549 int retval = -EIO;
1582 1550
1583 spin_lock_bh(&qdio->req_q_lock); 1551 spin_lock_irq(&qdio->req_q_lock);
1584 if (zfcp_qdio_sbal_get(qdio)) 1552 if (zfcp_qdio_sbal_get(qdio))
1585 goto out; 1553 goto out;
1586 1554
1587 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1588 SBAL_FLAGS0_TYPE_READ, 1556 SBAL_SFLAGS0_TYPE_READ,
1589 qdio->adapter->pool.erp_req); 1557 qdio->adapter->pool.erp_req);
1590 1558
1591 if (unlikely(IS_ERR(req))) { 1559 if (IS_ERR(req)) {
1592 retval = PTR_ERR(req); 1560 retval = PTR_ERR(req);
1593 goto out; 1561 goto out;
1594 } 1562 }
@@ -1605,7 +1573,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1605 if (retval) 1573 if (retval)
1606 zfcp_fsf_req_free(req); 1574 zfcp_fsf_req_free(req);
1607out: 1575out:
1608 spin_unlock_bh(&qdio->req_q_lock); 1576 spin_unlock_irq(&qdio->req_q_lock);
1609 return retval; 1577 return retval;
1610} 1578}
1611 1579
@@ -1615,7 +1583,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1615 1583
1616 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1584 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1617 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1585 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1618 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); 1586 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1619 } 1587 }
1620 1588
1621 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1589 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
@@ -1633,15 +1601,15 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1633 struct zfcp_fsf_req *req; 1601 struct zfcp_fsf_req *req;
1634 int retval = -EIO; 1602 int retval = -EIO;
1635 1603
1636 spin_lock_bh(&qdio->req_q_lock); 1604 spin_lock_irq(&qdio->req_q_lock);
1637 if (zfcp_qdio_sbal_get(qdio)) 1605 if (zfcp_qdio_sbal_get(qdio))
1638 goto out; 1606 goto out;
1639 1607
1640 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1641 SBAL_FLAGS0_TYPE_READ, 1609 SBAL_SFLAGS0_TYPE_READ,
1642 qdio->adapter->pool.erp_req); 1610 qdio->adapter->pool.erp_req);
1643 1611
1644 if (unlikely(IS_ERR(req))) { 1612 if (IS_ERR(req)) {
1645 retval = PTR_ERR(req); 1613 retval = PTR_ERR(req);
1646 goto out; 1614 goto out;
1647 } 1615 }
@@ -1658,7 +1626,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1658 if (retval) 1626 if (retval)
1659 zfcp_fsf_req_free(req); 1627 zfcp_fsf_req_free(req);
1660out: 1628out:
1661 spin_unlock_bh(&qdio->req_q_lock); 1629 spin_unlock_irq(&qdio->req_q_lock);
1662 return retval; 1630 return retval;
1663} 1631}
1664 1632
@@ -1666,29 +1634,30 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1666{ 1634{
1667 struct zfcp_port *port = req->data; 1635 struct zfcp_port *port = req->data;
1668 struct fsf_qtcb_header *header = &req->qtcb->header; 1636 struct fsf_qtcb_header *header = &req->qtcb->header;
1669 struct zfcp_unit *unit; 1637 struct scsi_device *sdev;
1670 1638
1671 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1639 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1672 return; 1640 return;
1673 1641
1674 switch (header->fsf_status) { 1642 switch (header->fsf_status) {
1675 case FSF_PORT_HANDLE_NOT_VALID: 1643 case FSF_PORT_HANDLE_NOT_VALID:
1676 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req); 1644 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1677 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1645 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1678 break; 1646 break;
1679 case FSF_ACCESS_DENIED: 1647 case FSF_ACCESS_DENIED:
1680 zfcp_fsf_access_denied_port(req, port); 1648 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1681 break; 1649 break;
1682 case FSF_PORT_BOXED: 1650 case FSF_PORT_BOXED:
1683 /* can't use generic zfcp_erp_modify_port_status because 1651 /* can't use generic zfcp_erp_modify_port_status because
1684 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1652 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1685 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1653 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1686 read_lock(&port->unit_list_lock); 1654 shost_for_each_device(sdev, port->adapter->scsi_host)
1687 list_for_each_entry(unit, &port->unit_list, list) 1655 if (sdev_to_zfcp(sdev)->port == port)
1688 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1656 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1689 &unit->status); 1657 &sdev_to_zfcp(sdev)->status);
1690 read_unlock(&port->unit_list_lock); 1658 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1691 zfcp_erp_port_boxed(port, "fscpph2", req); 1659 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1660 "fscpph2");
1692 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1661 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1693 break; 1662 break;
1694 case FSF_ADAPTER_STATUS_AVAILABLE: 1663 case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1705,11 +1674,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1705 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1674 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1706 */ 1675 */
1707 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1676 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1708 read_lock(&port->unit_list_lock); 1677 shost_for_each_device(sdev, port->adapter->scsi_host)
1709 list_for_each_entry(unit, &port->unit_list, list) 1678 if (sdev_to_zfcp(sdev)->port == port)
1710 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1679 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1711 &unit->status); 1680 &sdev_to_zfcp(sdev)->status);
1712 read_unlock(&port->unit_list_lock);
1713 break; 1681 break;
1714 } 1682 }
1715} 1683}
@@ -1725,12 +1693,12 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1725 struct zfcp_fsf_req *req; 1693 struct zfcp_fsf_req *req;
1726 int retval = -EIO; 1694 int retval = -EIO;
1727 1695
1728 spin_lock_bh(&qdio->req_q_lock); 1696 spin_lock_irq(&qdio->req_q_lock);
1729 if (zfcp_qdio_sbal_get(qdio)) 1697 if (zfcp_qdio_sbal_get(qdio))
1730 goto out; 1698 goto out;
1731 1699
1732 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1733 SBAL_FLAGS0_TYPE_READ, 1701 SBAL_SFLAGS0_TYPE_READ,
1734 qdio->adapter->pool.erp_req); 1702 qdio->adapter->pool.erp_req);
1735 1703
1736 if (IS_ERR(req)) { 1704 if (IS_ERR(req)) {
@@ -1754,69 +1722,56 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1754 erp_action->fsf_req_id = 0; 1722 erp_action->fsf_req_id = 0;
1755 } 1723 }
1756out: 1724out:
1757 spin_unlock_bh(&qdio->req_q_lock); 1725 spin_unlock_irq(&qdio->req_q_lock);
1758 return retval; 1726 return retval;
1759} 1727}
1760 1728
1761static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) 1729static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1762{ 1730{
1763 struct zfcp_adapter *adapter = req->adapter; 1731 struct zfcp_adapter *adapter = req->adapter;
1764 struct zfcp_unit *unit = req->data; 1732 struct scsi_device *sdev = req->data;
1733 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1765 struct fsf_qtcb_header *header = &req->qtcb->header; 1734 struct fsf_qtcb_header *header = &req->qtcb->header;
1766 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1735 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1767 struct fsf_queue_designator *queue_designator =
1768 &header->fsf_status_qual.fsf_queue_designator;
1769 int exclusive, readwrite;
1770 1736
1771 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1737 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1772 return; 1738 return;
1773 1739
1774 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1740 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1775 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1741 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1776 ZFCP_STATUS_UNIT_SHARED | 1742 ZFCP_STATUS_LUN_SHARED |
1777 ZFCP_STATUS_UNIT_READONLY, 1743 ZFCP_STATUS_LUN_READONLY,
1778 &unit->status); 1744 &zfcp_sdev->status);
1779 1745
1780 switch (header->fsf_status) { 1746 switch (header->fsf_status) {
1781 1747
1782 case FSF_PORT_HANDLE_NOT_VALID: 1748 case FSF_PORT_HANDLE_NOT_VALID:
1783 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); 1749 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1784 /* fall through */ 1750 /* fall through */
1785 case FSF_LUN_ALREADY_OPEN: 1751 case FSF_LUN_ALREADY_OPEN:
1786 break; 1752 break;
1787 case FSF_ACCESS_DENIED: 1753 case FSF_ACCESS_DENIED:
1788 zfcp_fsf_access_denied_unit(req, unit); 1754 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1789 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1755 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1790 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1791 break; 1756 break;
1792 case FSF_PORT_BOXED: 1757 case FSF_PORT_BOXED:
1793 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1758 zfcp_erp_set_port_status(zfcp_sdev->port,
1759 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1760 zfcp_erp_port_reopen(zfcp_sdev->port,
1761 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1794 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1762 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1795 break; 1763 break;
1796 case FSF_LUN_SHARING_VIOLATION: 1764 case FSF_LUN_SHARING_VIOLATION:
1797 if (header->fsf_status_qual.word[0]) 1765 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1798 dev_warn(&adapter->ccw_device->dev,
1799 "LUN 0x%Lx on port 0x%Lx is already in "
1800 "use by CSS%d, MIF Image ID %x\n",
1801 (unsigned long long)unit->fcp_lun,
1802 (unsigned long long)unit->port->wwpn,
1803 queue_designator->cssid,
1804 queue_designator->hla);
1805 else
1806 zfcp_act_eval_err(adapter,
1807 header->fsf_status_qual.word[2]);
1808 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1809 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1810 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1811 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1766 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 break; 1767 break;
1813 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1768 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1814 dev_warn(&adapter->ccw_device->dev, 1769 dev_warn(&adapter->ccw_device->dev,
1815 "No handle is available for LUN " 1770 "No handle is available for LUN "
1816 "0x%016Lx on port 0x%016Lx\n", 1771 "0x%016Lx on port 0x%016Lx\n",
1817 (unsigned long long)unit->fcp_lun, 1772 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1818 (unsigned long long)unit->port->wwpn); 1773 (unsigned long long)zfcp_sdev->port->wwpn);
1819 zfcp_erp_unit_failed(unit, "fsouh_4", req); 1774 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1820 /* fall through */ 1775 /* fall through */
1821 case FSF_INVALID_COMMAND_OPTION: 1776 case FSF_INVALID_COMMAND_OPTION:
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1777 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1824,7 +1779,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1824 case FSF_ADAPTER_STATUS_AVAILABLE: 1779 case FSF_ADAPTER_STATUS_AVAILABLE:
1825 switch (header->fsf_status_qual.word[0]) { 1780 switch (header->fsf_status_qual.word[0]) {
1826 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1781 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1827 zfcp_fc_test_link(unit->port); 1782 zfcp_fc_test_link(zfcp_sdev->port);
1828 /* fall through */ 1783 /* fall through */
1829 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1784 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1830 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1785 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1833,75 +1788,31 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1833 break; 1788 break;
1834 1789
1835 case FSF_GOOD: 1790 case FSF_GOOD:
1836 unit->handle = header->lun_handle; 1791 zfcp_sdev->lun_handle = header->lun_handle;
1837 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1792 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1838 1793 zfcp_cfdc_open_lun_eval(sdev, bottom);
1839 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1840 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1841 !zfcp_ccw_priv_sch(adapter)) {
1842 exclusive = (bottom->lun_access_info &
1843 FSF_UNIT_ACCESS_EXCLUSIVE);
1844 readwrite = (bottom->lun_access_info &
1845 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1846
1847 if (!exclusive)
1848 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1849 &unit->status);
1850
1851 if (!readwrite) {
1852 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1853 &unit->status);
1854 dev_info(&adapter->ccw_device->dev,
1855 "SCSI device at LUN 0x%016Lx on port "
1856 "0x%016Lx opened read-only\n",
1857 (unsigned long long)unit->fcp_lun,
1858 (unsigned long long)unit->port->wwpn);
1859 }
1860
1861 if (exclusive && !readwrite) {
1862 dev_err(&adapter->ccw_device->dev,
1863 "Exclusive read-only access not "
1864 "supported (unit 0x%016Lx, "
1865 "port 0x%016Lx)\n",
1866 (unsigned long long)unit->fcp_lun,
1867 (unsigned long long)unit->port->wwpn);
1868 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1870 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1871 } else if (!exclusive && readwrite) {
1872 dev_err(&adapter->ccw_device->dev,
1873 "Shared read-write access not "
1874 "supported (unit 0x%016Lx, port "
1875 "0x%016Lx)\n",
1876 (unsigned long long)unit->fcp_lun,
1877 (unsigned long long)unit->port->wwpn);
1878 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1879 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1880 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1881 }
1882 }
1883 break; 1794 break;
1884 } 1795 }
1885} 1796}
1886 1797
1887/** 1798/**
1888 * zfcp_fsf_open_unit - open unit 1799 * zfcp_fsf_open_lun - open LUN
1889 * @erp_action: pointer to struct zfcp_erp_action 1800 * @erp_action: pointer to struct zfcp_erp_action
1890 * Returns: 0 on success, error otherwise 1801 * Returns: 0 on success, error otherwise
1891 */ 1802 */
1892int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1803int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1893{ 1804{
1894 struct zfcp_adapter *adapter = erp_action->adapter; 1805 struct zfcp_adapter *adapter = erp_action->adapter;
1895 struct zfcp_qdio *qdio = adapter->qdio; 1806 struct zfcp_qdio *qdio = adapter->qdio;
1896 struct zfcp_fsf_req *req; 1807 struct zfcp_fsf_req *req;
1897 int retval = -EIO; 1808 int retval = -EIO;
1898 1809
1899 spin_lock_bh(&qdio->req_q_lock); 1810 spin_lock_irq(&qdio->req_q_lock);
1900 if (zfcp_qdio_sbal_get(qdio)) 1811 if (zfcp_qdio_sbal_get(qdio))
1901 goto out; 1812 goto out;
1902 1813
1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1904 SBAL_FLAGS0_TYPE_READ, 1815 SBAL_SFLAGS0_TYPE_READ,
1905 adapter->pool.erp_req); 1816 adapter->pool.erp_req);
1906 1817
1907 if (IS_ERR(req)) { 1818 if (IS_ERR(req)) {
@@ -1913,9 +1824,9 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1913 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1824 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1914 1825
1915 req->qtcb->header.port_handle = erp_action->port->handle; 1826 req->qtcb->header.port_handle = erp_action->port->handle;
1916 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1827 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1917 req->handler = zfcp_fsf_open_unit_handler; 1828 req->handler = zfcp_fsf_open_lun_handler;
1918 req->data = erp_action->unit; 1829 req->data = erp_action->sdev;
1919 req->erp_action = erp_action; 1830 req->erp_action = erp_action;
1920 erp_action->fsf_req_id = req->req_id; 1831 erp_action->fsf_req_id = req->req_id;
1921 1832
@@ -1929,34 +1840,38 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1929 erp_action->fsf_req_id = 0; 1840 erp_action->fsf_req_id = 0;
1930 } 1841 }
1931out: 1842out:
1932 spin_unlock_bh(&qdio->req_q_lock); 1843 spin_unlock_irq(&qdio->req_q_lock);
1933 return retval; 1844 return retval;
1934} 1845}
1935 1846
1936static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) 1847static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1937{ 1848{
1938 struct zfcp_unit *unit = req->data; 1849 struct scsi_device *sdev = req->data;
1850 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1939 1851
1940 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1852 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1941 return; 1853 return;
1942 1854
1943 switch (req->qtcb->header.fsf_status) { 1855 switch (req->qtcb->header.fsf_status) {
1944 case FSF_PORT_HANDLE_NOT_VALID: 1856 case FSF_PORT_HANDLE_NOT_VALID:
1945 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); 1857 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1946 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1858 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1947 break; 1859 break;
1948 case FSF_LUN_HANDLE_NOT_VALID: 1860 case FSF_LUN_HANDLE_NOT_VALID:
1949 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); 1861 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1950 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1862 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1951 break; 1863 break;
1952 case FSF_PORT_BOXED: 1864 case FSF_PORT_BOXED:
1953 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 1865 zfcp_erp_set_port_status(zfcp_sdev->port,
1866 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1867 zfcp_erp_port_reopen(zfcp_sdev->port,
1868 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1954 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1869 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1955 break; 1870 break;
1956 case FSF_ADAPTER_STATUS_AVAILABLE: 1871 case FSF_ADAPTER_STATUS_AVAILABLE:
1957 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1872 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1958 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1873 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1959 zfcp_fc_test_link(unit->port); 1874 zfcp_fc_test_link(zfcp_sdev->port);
1960 /* fall through */ 1875 /* fall through */
1961 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1876 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1962 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1877 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1964,28 +1879,29 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1964 } 1879 }
1965 break; 1880 break;
1966 case FSF_GOOD: 1881 case FSF_GOOD:
1967 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1882 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1968 break; 1883 break;
1969 } 1884 }
1970} 1885}
1971 1886
1972/** 1887/**
1973 * zfcp_fsf_close_unit - close zfcp unit 1888 * zfcp_fsf_close_LUN - close LUN
1974 * @erp_action: pointer to struct zfcp_unit 1889 * @erp_action: pointer to erp_action triggering the "close LUN"
1975 * Returns: 0 on success, error otherwise 1890 * Returns: 0 on success, error otherwise
1976 */ 1891 */
1977int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 1892int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1978{ 1893{
1979 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1894 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1895 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1980 struct zfcp_fsf_req *req; 1896 struct zfcp_fsf_req *req;
1981 int retval = -EIO; 1897 int retval = -EIO;
1982 1898
1983 spin_lock_bh(&qdio->req_q_lock); 1899 spin_lock_irq(&qdio->req_q_lock);
1984 if (zfcp_qdio_sbal_get(qdio)) 1900 if (zfcp_qdio_sbal_get(qdio))
1985 goto out; 1901 goto out;
1986 1902
1987 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1988 SBAL_FLAGS0_TYPE_READ, 1904 SBAL_SFLAGS0_TYPE_READ,
1989 qdio->adapter->pool.erp_req); 1905 qdio->adapter->pool.erp_req);
1990 1906
1991 if (IS_ERR(req)) { 1907 if (IS_ERR(req)) {
@@ -1997,9 +1913,9 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1997 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1913 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1998 1914
1999 req->qtcb->header.port_handle = erp_action->port->handle; 1915 req->qtcb->header.port_handle = erp_action->port->handle;
2000 req->qtcb->header.lun_handle = erp_action->unit->handle; 1916 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2001 req->handler = zfcp_fsf_close_unit_handler; 1917 req->handler = zfcp_fsf_close_lun_handler;
2002 req->data = erp_action->unit; 1918 req->data = erp_action->sdev;
2003 req->erp_action = erp_action; 1919 req->erp_action = erp_action;
2004 erp_action->fsf_req_id = req->req_id; 1920 erp_action->fsf_req_id = req->req_id;
2005 1921
@@ -2010,7 +1926,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2010 erp_action->fsf_req_id = 0; 1926 erp_action->fsf_req_id = 0;
2011 } 1927 }
2012out: 1928out:
2013 spin_unlock_bh(&qdio->req_q_lock); 1929 spin_unlock_irq(&qdio->req_q_lock);
2014 return retval; 1930 return retval;
2015} 1931}
2016 1932
@@ -2025,7 +1941,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2025{ 1941{
2026 struct fsf_qual_latency_info *lat_in; 1942 struct fsf_qual_latency_info *lat_in;
2027 struct latency_cont *lat = NULL; 1943 struct latency_cont *lat = NULL;
2028 struct zfcp_unit *unit = req->unit; 1944 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
2029 struct zfcp_blk_drv_data blktrc; 1945 struct zfcp_blk_drv_data blktrc;
2030 int ticks = req->adapter->timer_ticks; 1946 int ticks = req->adapter->timer_ticks;
2031 1947
@@ -2048,24 +1964,24 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2048 case FSF_DATADIR_DIF_READ_STRIP: 1964 case FSF_DATADIR_DIF_READ_STRIP:
2049 case FSF_DATADIR_DIF_READ_CONVERT: 1965 case FSF_DATADIR_DIF_READ_CONVERT:
2050 case FSF_DATADIR_READ: 1966 case FSF_DATADIR_READ:
2051 lat = &unit->latencies.read; 1967 lat = &zfcp_sdev->latencies.read;
2052 break; 1968 break;
2053 case FSF_DATADIR_DIF_WRITE_INSERT: 1969 case FSF_DATADIR_DIF_WRITE_INSERT:
2054 case FSF_DATADIR_DIF_WRITE_CONVERT: 1970 case FSF_DATADIR_DIF_WRITE_CONVERT:
2055 case FSF_DATADIR_WRITE: 1971 case FSF_DATADIR_WRITE:
2056 lat = &unit->latencies.write; 1972 lat = &zfcp_sdev->latencies.write;
2057 break; 1973 break;
2058 case FSF_DATADIR_CMND: 1974 case FSF_DATADIR_CMND:
2059 lat = &unit->latencies.cmd; 1975 lat = &zfcp_sdev->latencies.cmd;
2060 break; 1976 break;
2061 } 1977 }
2062 1978
2063 if (lat) { 1979 if (lat) {
2064 spin_lock(&unit->latencies.lock); 1980 spin_lock(&zfcp_sdev->latencies.lock);
2065 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 1981 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2066 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 1982 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2067 lat->counter++; 1983 lat->counter++;
2068 spin_unlock(&unit->latencies.lock); 1984 spin_unlock(&zfcp_sdev->latencies.lock);
2069 } 1985 }
2070 } 1986 }
2071 1987
@@ -2073,7 +1989,79 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2073 sizeof(blktrc)); 1989 sizeof(blktrc));
2074} 1990}
2075 1991
2076static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 1992static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1993{
1994 struct scsi_cmnd *scmnd = req->data;
1995 struct scsi_device *sdev = scmnd->device;
1996 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1997 struct fsf_qtcb_header *header = &req->qtcb->header;
1998
1999 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2000 return;
2001
2002 switch (header->fsf_status) {
2003 case FSF_HANDLE_MISMATCH:
2004 case FSF_PORT_HANDLE_NOT_VALID:
2005 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2006 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2007 break;
2008 case FSF_FCPLUN_NOT_VALID:
2009 case FSF_LUN_HANDLE_NOT_VALID:
2010 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2011 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2012 break;
2013 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2014 zfcp_fsf_class_not_supp(req);
2015 break;
2016 case FSF_ACCESS_DENIED:
2017 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2018 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2019 break;
2020 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2021 dev_err(&req->adapter->ccw_device->dev,
2022 "Incorrect direction %d, LUN 0x%016Lx on port "
2023 "0x%016Lx closed\n",
2024 req->qtcb->bottom.io.data_direction,
2025 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2026 (unsigned long long)zfcp_sdev->port->wwpn);
2027 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2028 "fssfch3");
2029 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2030 break;
2031 case FSF_CMND_LENGTH_NOT_VALID:
2032 dev_err(&req->adapter->ccw_device->dev,
2033 "Incorrect CDB length %d, LUN 0x%016Lx on "
2034 "port 0x%016Lx closed\n",
2035 req->qtcb->bottom.io.fcp_cmnd_length,
2036 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2037 (unsigned long long)zfcp_sdev->port->wwpn);
2038 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2039 "fssfch4");
2040 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2041 break;
2042 case FSF_PORT_BOXED:
2043 zfcp_erp_set_port_status(zfcp_sdev->port,
2044 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2045 zfcp_erp_port_reopen(zfcp_sdev->port,
2046 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2047 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2048 break;
2049 case FSF_LUN_BOXED:
2050 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2051 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2052 "fssfch6");
2053 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2054 break;
2055 case FSF_ADAPTER_STATUS_AVAILABLE:
2056 if (header->fsf_status_qual.word[0] ==
2057 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2058 zfcp_fc_test_link(zfcp_sdev->port);
2059 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2060 break;
2061 }
2062}
2063
2064static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2077{ 2065{
2078 struct scsi_cmnd *scpnt; 2066 struct scsi_cmnd *scpnt;
2079 struct fcp_resp_with_ext *fcp_rsp; 2067 struct fcp_resp_with_ext *fcp_rsp;
@@ -2087,6 +2075,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2087 return; 2075 return;
2088 } 2076 }
2089 2077
2078 zfcp_fsf_fcp_handler_common(req);
2079
2090 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2080 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2091 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2081 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2092 goto skip_fsfstatus; 2082 goto skip_fsfstatus;
@@ -2112,7 +2102,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2112 2102
2113skip_fsfstatus: 2103skip_fsfstatus:
2114 zfcp_fsf_req_trace(req, scpnt); 2104 zfcp_fsf_req_trace(req, scpnt);
2115 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); 2105 zfcp_dbf_scsi_result(scpnt, req);
2116 2106
2117 scpnt->host_scribble = NULL; 2107 scpnt->host_scribble = NULL;
2118 (scpnt->scsi_done) (scpnt); 2108 (scpnt->scsi_done) (scpnt);
@@ -2125,97 +2115,6 @@ skip_fsfstatus:
2125 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2115 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2126} 2116}
2127 2117
2128static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2129{
2130 struct fcp_resp_with_ext *fcp_rsp;
2131 struct fcp_resp_rsp_info *rsp_info;
2132
2133 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2134 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2135
2136 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2137 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2138 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2139}
2140
2141
2142static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2143{
2144 struct zfcp_unit *unit;
2145 struct fsf_qtcb_header *header = &req->qtcb->header;
2146
2147 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2148 unit = req->data;
2149 else
2150 unit = req->unit;
2151
2152 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2153 goto skip_fsfstatus;
2154
2155 switch (header->fsf_status) {
2156 case FSF_HANDLE_MISMATCH:
2157 case FSF_PORT_HANDLE_NOT_VALID:
2158 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2159 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2160 break;
2161 case FSF_FCPLUN_NOT_VALID:
2162 case FSF_LUN_HANDLE_NOT_VALID:
2163 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2164 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2165 break;
2166 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2167 zfcp_fsf_class_not_supp(req);
2168 break;
2169 case FSF_ACCESS_DENIED:
2170 zfcp_fsf_access_denied_unit(req, unit);
2171 break;
2172 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2173 dev_err(&req->adapter->ccw_device->dev,
2174 "Incorrect direction %d, unit 0x%016Lx on port "
2175 "0x%016Lx closed\n",
2176 req->qtcb->bottom.io.data_direction,
2177 (unsigned long long)unit->fcp_lun,
2178 (unsigned long long)unit->port->wwpn);
2179 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2180 req);
2181 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2182 break;
2183 case FSF_CMND_LENGTH_NOT_VALID:
2184 dev_err(&req->adapter->ccw_device->dev,
2185 "Incorrect CDB length %d, unit 0x%016Lx on "
2186 "port 0x%016Lx closed\n",
2187 req->qtcb->bottom.io.fcp_cmnd_length,
2188 (unsigned long long)unit->fcp_lun,
2189 (unsigned long long)unit->port->wwpn);
2190 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2191 req);
2192 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2193 break;
2194 case FSF_PORT_BOXED:
2195 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2196 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2197 break;
2198 case FSF_LUN_BOXED:
2199 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2200 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2201 break;
2202 case FSF_ADAPTER_STATUS_AVAILABLE:
2203 if (header->fsf_status_qual.word[0] ==
2204 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2205 zfcp_fc_test_link(unit->port);
2206 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2207 break;
2208 }
2209skip_fsfstatus:
2210 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2211 zfcp_fsf_send_fcp_ctm_handler(req);
2212 else {
2213 zfcp_fsf_send_fcp_command_task_handler(req);
2214 req->unit = NULL;
2215 put_device(&unit->dev);
2216 }
2217}
2218
2219static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2118static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2220{ 2119{
2221 switch (scsi_get_prot_op(scsi_cmnd)) { 2120 switch (scsi_get_prot_op(scsi_cmnd)) {
@@ -2255,33 +2154,34 @@ static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2255} 2154}
2256 2155
2257/** 2156/**
2258 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2157 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2259 * @unit: unit where command is sent to
2260 * @scsi_cmnd: scsi command to be sent 2158 * @scsi_cmnd: scsi command to be sent
2261 */ 2159 */
2262int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, 2160int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2263 struct scsi_cmnd *scsi_cmnd)
2264{ 2161{
2265 struct zfcp_fsf_req *req; 2162 struct zfcp_fsf_req *req;
2266 struct fcp_cmnd *fcp_cmnd; 2163 struct fcp_cmnd *fcp_cmnd;
2267 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2268 int real_bytes, retval = -EIO, dix_bytes = 0; 2165 int real_bytes, retval = -EIO, dix_bytes = 0;
2269 struct zfcp_adapter *adapter = unit->port->adapter; 2166 struct scsi_device *sdev = scsi_cmnd->device;
2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2168 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2270 struct zfcp_qdio *qdio = adapter->qdio; 2169 struct zfcp_qdio *qdio = adapter->qdio;
2271 struct fsf_qtcb_bottom_io *io; 2170 struct fsf_qtcb_bottom_io *io;
2171 unsigned long flags;
2272 2172
2273 if (unlikely(!(atomic_read(&unit->status) & 2173 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2274 ZFCP_STATUS_COMMON_UNBLOCKED))) 2174 ZFCP_STATUS_COMMON_UNBLOCKED)))
2275 return -EBUSY; 2175 return -EBUSY;
2276 2176
2277 spin_lock(&qdio->req_q_lock); 2177 spin_lock_irqsave(&qdio->req_q_lock, flags);
2278 if (atomic_read(&qdio->req_q_free) <= 0) { 2178 if (atomic_read(&qdio->req_q_free) <= 0) {
2279 atomic_inc(&qdio->req_q_full); 2179 atomic_inc(&qdio->req_q_full);
2280 goto out; 2180 goto out;
2281 } 2181 }
2282 2182
2283 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2284 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2184 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2285 2185
2286 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2287 sbtype, adapter->pool.scsi_req); 2187 sbtype, adapter->pool.scsi_req);
@@ -2295,11 +2195,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2295 2195
2296 io = &req->qtcb->bottom.io; 2196 io = &req->qtcb->bottom.io;
2297 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2197 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2298 req->unit = unit;
2299 req->data = scsi_cmnd; 2198 req->data = scsi_cmnd;
2300 req->handler = zfcp_fsf_send_fcp_command_handler; 2199 req->handler = zfcp_fsf_fcp_cmnd_handler;
2301 req->qtcb->header.lun_handle = unit->handle; 2200 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2302 req->qtcb->header.port_handle = unit->port->handle; 2201 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2303 io->service_class = FSF_CLASS_3; 2202 io->service_class = FSF_CLASS_3;
2304 io->fcp_cmnd_length = FCP_CMND_LEN; 2203 io->fcp_cmnd_length = FCP_CMND_LEN;
2305 2204
@@ -2310,10 +2209,8 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2310 2209
2311 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); 2210 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2312 2211
2313 get_device(&unit->dev);
2314
2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2212 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2213 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2317 2214
2318 if (scsi_prot_sg_count(scsi_cmnd)) { 2215 if (scsi_prot_sg_count(scsi_cmnd)) {
2319 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2216 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2338,36 +2235,52 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2338 goto out; 2235 goto out;
2339 2236
2340failed_scsi_cmnd: 2237failed_scsi_cmnd:
2341 put_device(&unit->dev);
2342 zfcp_fsf_req_free(req); 2238 zfcp_fsf_req_free(req);
2343 scsi_cmnd->host_scribble = NULL; 2239 scsi_cmnd->host_scribble = NULL;
2344out: 2240out:
2345 spin_unlock(&qdio->req_q_lock); 2241 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2346 return retval; 2242 return retval;
2347} 2243}
2348 2244
2245static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2246{
2247 struct fcp_resp_with_ext *fcp_rsp;
2248 struct fcp_resp_rsp_info *rsp_info;
2249
2250 zfcp_fsf_fcp_handler_common(req);
2251
2252 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2253 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2254
2255 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2256 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2257 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2258}
2259
2349/** 2260/**
2350 * zfcp_fsf_send_fcp_ctm - send SCSI task management command 2261 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2351 * @unit: pointer to struct zfcp_unit 2262 * @scmnd: SCSI command to send the task management command for
2352 * @tm_flags: unsigned byte for task management flags 2263 * @tm_flags: unsigned byte for task management flags
2353 * Returns: on success pointer to struct fsf_req, NULL otherwise 2264 * Returns: on success pointer to struct fsf_req, NULL otherwise
2354 */ 2265 */
2355struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) 2266struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2267 u8 tm_flags)
2356{ 2268{
2357 struct zfcp_fsf_req *req = NULL; 2269 struct zfcp_fsf_req *req = NULL;
2358 struct fcp_cmnd *fcp_cmnd; 2270 struct fcp_cmnd *fcp_cmnd;
2359 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2271 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2272 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2360 2273
2361 if (unlikely(!(atomic_read(&unit->status) & 2274 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2362 ZFCP_STATUS_COMMON_UNBLOCKED))) 2275 ZFCP_STATUS_COMMON_UNBLOCKED)))
2363 return NULL; 2276 return NULL;
2364 2277
2365 spin_lock_bh(&qdio->req_q_lock); 2278 spin_lock_irq(&qdio->req_q_lock);
2366 if (zfcp_qdio_sbal_get(qdio)) 2279 if (zfcp_qdio_sbal_get(qdio))
2367 goto out; 2280 goto out;
2368 2281
2369 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2370 SBAL_FLAGS0_TYPE_WRITE, 2283 SBAL_SFLAGS0_TYPE_WRITE,
2371 qdio->adapter->pool.scsi_req); 2284 qdio->adapter->pool.scsi_req);
2372 2285
2373 if (IS_ERR(req)) { 2286 if (IS_ERR(req)) {
@@ -2375,11 +2288,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2375 goto out; 2288 goto out;
2376 } 2289 }
2377 2290
2378 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2291 req->data = scmnd;
2379 req->data = unit; 2292 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2380 req->handler = zfcp_fsf_send_fcp_command_handler; 2293 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2381 req->qtcb->header.lun_handle = unit->handle; 2294 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2382 req->qtcb->header.port_handle = unit->port->handle;
2383 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2295 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2384 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2296 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2385 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2297 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
@@ -2387,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2387 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2299 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2388 2300
2389 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2301 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2390 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); 2302 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2391 2303
2392 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2304 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2393 if (!zfcp_fsf_req_send(req)) 2305 if (!zfcp_fsf_req_send(req))
@@ -2396,7 +2308,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2396 zfcp_fsf_req_free(req); 2308 zfcp_fsf_req_free(req);
2397 req = NULL; 2309 req = NULL;
2398out: 2310out:
2399 spin_unlock_bh(&qdio->req_q_lock); 2311 spin_unlock_irq(&qdio->req_q_lock);
2400 return req; 2312 return req;
2401} 2313}
2402 2314
@@ -2416,23 +2328,24 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2416 struct zfcp_qdio *qdio = adapter->qdio; 2328 struct zfcp_qdio *qdio = adapter->qdio;
2417 struct zfcp_fsf_req *req = NULL; 2329 struct zfcp_fsf_req *req = NULL;
2418 struct fsf_qtcb_bottom_support *bottom; 2330 struct fsf_qtcb_bottom_support *bottom;
2419 int direction, retval = -EIO, bytes; 2331 int retval = -EIO, bytes;
2332 u8 direction;
2420 2333
2421 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2422 return ERR_PTR(-EOPNOTSUPP); 2335 return ERR_PTR(-EOPNOTSUPP);
2423 2336
2424 switch (fsf_cfdc->command) { 2337 switch (fsf_cfdc->command) {
2425 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 2338 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2426 direction = SBAL_FLAGS0_TYPE_WRITE; 2339 direction = SBAL_SFLAGS0_TYPE_WRITE;
2427 break; 2340 break;
2428 case FSF_QTCB_UPLOAD_CONTROL_FILE: 2341 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2429 direction = SBAL_FLAGS0_TYPE_READ; 2342 direction = SBAL_SFLAGS0_TYPE_READ;
2430 break; 2343 break;
2431 default: 2344 default:
2432 return ERR_PTR(-EINVAL); 2345 return ERR_PTR(-EINVAL);
2433 } 2346 }
2434 2347
2435 spin_lock_bh(&qdio->req_q_lock); 2348 spin_lock_irq(&qdio->req_q_lock);
2436 if (zfcp_qdio_sbal_get(qdio)) 2349 if (zfcp_qdio_sbal_get(qdio))
2437 goto out; 2350 goto out;
2438 2351
@@ -2459,7 +2372,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2459 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2372 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2460 retval = zfcp_fsf_req_send(req); 2373 retval = zfcp_fsf_req_send(req);
2461out: 2374out:
2462 spin_unlock_bh(&qdio->req_q_lock); 2375 spin_unlock_irq(&qdio->req_q_lock);
2463 2376
2464 if (!retval) { 2377 if (!retval) {
2465 wait_for_completion(&req->completion); 2378 wait_for_completion(&req->completion);
@@ -2501,7 +2414,16 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2501 fsf_req->qdio_req.sbal_response = sbal_idx; 2414 fsf_req->qdio_req.sbal_response = sbal_idx;
2502 zfcp_fsf_req_complete(fsf_req); 2415 zfcp_fsf_req_complete(fsf_req);
2503 2416
2504 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2417 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2505 break; 2418 break;
2506 } 2419 }
2507} 2420}
2421
2422struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
2423 struct qdio_buffer *sbal)
2424{
2425 struct qdio_buffer_element *sbale = &sbal->element[0];
2426 u64 req_id = (unsigned long) sbale->addr;
2427
2428 return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
2429}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721c..d9c40ea73eef 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -41,7 +41,7 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
41 zfcp_qdio_siosl(adapter); 41 zfcp_qdio_siosl(adapter);
42 zfcp_erp_adapter_reopen(adapter, 42 zfcp_erp_adapter_reopen(adapter,
43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
44 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); 44 ZFCP_STATUS_COMMON_ERP_FAILED, id);
45} 45}
46 46
47static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 47static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
60 unsigned long long now, span; 60 unsigned long long now, span;
61 int used; 61 int used;
62 62
63 spin_lock(&qdio->stat_lock);
64 now = get_clock_monotonic(); 63 now = get_clock_monotonic();
65 span = (now - qdio->req_q_time) >> 12; 64 span = (now - qdio->req_q_time) >> 12;
66 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 65 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
67 qdio->req_q_util += used * span; 66 qdio->req_q_util += used * span;
68 qdio->req_q_time = now; 67 qdio->req_q_time = now;
69 spin_unlock(&qdio->stat_lock);
70} 68}
71 69
72static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 70static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
@@ -76,7 +74,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
76 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
77 75
78 if (unlikely(qdio_err)) { 76 if (unlikely(qdio_err)) {
79 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
80 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); 77 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
81 return; 78 return;
82 } 79 }
@@ -84,7 +81,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
84 /* cleanup all SBALs being program-owned now */ 81 /* cleanup all SBALs being program-owned now */
85 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 82 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
86 83
84 spin_lock_irq(&qdio->stat_lock);
87 zfcp_qdio_account(qdio); 85 zfcp_qdio_account(qdio);
86 spin_unlock_irq(&qdio->stat_lock);
88 atomic_add(count, &qdio->req_q_free); 87 atomic_add(count, &qdio->req_q_free);
89 wake_up(&qdio->req_q_wq); 88 wake_up(&qdio->req_q_wq);
90} 89}
@@ -97,7 +96,6 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
97 int sbal_idx, sbal_no; 96 int sbal_idx, sbal_no;
98 97
99 if (unlikely(qdio_err)) { 98 if (unlikely(qdio_err)) {
100 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
101 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 99 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
102 return; 100 return;
103 } 101 }
@@ -116,7 +114,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
116 * put SBALs back to response queue 114 * put SBALs back to response queue
117 */ 115 */
118 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) 116 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
119 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL); 117 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
120} 118}
121 119
122static struct qdio_buffer_element * 120static struct qdio_buffer_element *
@@ -126,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
126 124
127 /* set last entry flag in current SBALE of current SBAL */ 125 /* set last entry flag in current SBALE of current SBAL */
128 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 126 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
129 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 127 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
130 128
131 /* don't exceed last allowed SBAL */ 129 /* don't exceed last allowed SBAL */
132 if (q_req->sbal_last == q_req->sbal_limit) 130 if (q_req->sbal_last == q_req->sbal_limit)
@@ -134,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
134 132
135 /* set chaining flag in first SBALE of current SBAL */ 133 /* set chaining flag in first SBALE of current SBAL */
136 sbale = zfcp_qdio_sbale_req(qdio, q_req); 134 sbale = zfcp_qdio_sbale_req(qdio, q_req);
137 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 135 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
138 136
139 /* calculate index of next SBAL */ 137 /* calculate index of next SBAL */
140 q_req->sbal_last++; 138 q_req->sbal_last++;
@@ -149,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
149 147
150 /* set storage-block type for new SBAL */ 148 /* set storage-block type for new SBAL */
151 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 149 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
152 sbale->flags |= q_req->sbtype; 150 sbale->sflags |= q_req->sbtype;
153 151
154 return sbale; 152 return sbale;
155} 153}
@@ -179,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
179 177
180 /* set storage-block type for this request */ 178 /* set storage-block type for this request */
181 sbale = zfcp_qdio_sbale_req(qdio, q_req); 179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
182 sbale->flags |= q_req->sbtype; 180 sbale->sflags |= q_req->sbtype;
183 181
184 for (; sg; sg = sg_next(sg)) { 182 for (; sg; sg = sg_next(sg)) {
185 sbale = zfcp_qdio_sbale_next(qdio, q_req); 183 sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -201,11 +199,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
201 199
202static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 200static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
203{ 201{
204 spin_lock_bh(&qdio->req_q_lock); 202 spin_lock_irq(&qdio->req_q_lock);
205 if (atomic_read(&qdio->req_q_free) || 203 if (atomic_read(&qdio->req_q_free) ||
206 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 204 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
207 return 1; 205 return 1;
208 spin_unlock_bh(&qdio->req_q_lock); 206 spin_unlock_irq(&qdio->req_q_lock);
209 return 0; 207 return 0;
210} 208}
211 209
@@ -223,7 +221,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
223{ 221{
224 long ret; 222 long ret;
225 223
226 spin_unlock_bh(&qdio->req_q_lock); 224 spin_unlock_irq(&qdio->req_q_lock);
227 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 225 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
228 zfcp_qdio_sbal_check(qdio), 5 * HZ); 226 zfcp_qdio_sbal_check(qdio), 5 * HZ);
229 227
@@ -236,10 +234,10 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
236 if (!ret) { 234 if (!ret) {
237 atomic_inc(&qdio->req_q_full); 235 atomic_inc(&qdio->req_q_full);
238 /* assume hanging outbound queue, try queue recovery */ 236 /* assume hanging outbound queue, try queue recovery */
239 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); 237 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
240 } 238 }
241 239
242 spin_lock_bh(&qdio->req_q_lock); 240 spin_lock_irq(&qdio->req_q_lock);
243 return -EIO; 241 return -EIO;
244} 242}
245 243
@@ -254,7 +252,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
254 int retval; 252 int retval;
255 u8 sbal_number = q_req->sbal_number; 253 u8 sbal_number = q_req->sbal_number;
256 254
255 spin_lock(&qdio->stat_lock);
257 zfcp_qdio_account(qdio); 256 zfcp_qdio_account(qdio);
257 spin_unlock(&qdio->stat_lock);
258 258
259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
260 q_req->sbal_first, sbal_number); 260 q_req->sbal_first, sbal_number);
@@ -277,16 +277,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, 277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
278 struct zfcp_qdio *qdio) 278 struct zfcp_qdio *qdio)
279{ 279{
280 280 memset(id, 0, sizeof(*id));
281 id->cdev = qdio->adapter->ccw_device; 281 id->cdev = qdio->adapter->ccw_device;
282 id->q_format = QDIO_ZFCP_QFMT; 282 id->q_format = QDIO_ZFCP_QFMT;
283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
284 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
286 id->qib_param_field_format = 0;
287 id->qib_param_field = NULL;
288 id->input_slib_elements = NULL;
289 id->output_slib_elements = NULL;
290 id->no_input_qs = 1; 286 id->no_input_qs = 1;
291 id->no_output_qs = 1; 287 id->no_output_qs = 1;
292 id->input_handler = zfcp_qdio_int_resp; 288 id->input_handler = zfcp_qdio_int_resp;
@@ -294,6 +290,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
294 id->int_parm = (unsigned long) qdio; 290 id->int_parm = (unsigned long) qdio;
295 id->input_sbal_addr_array = (void **) (qdio->res_q); 291 id->input_sbal_addr_array = (void **) (qdio->res_q);
296 id->output_sbal_addr_array = (void **) (qdio->req_q); 292 id->output_sbal_addr_array = (void **) (qdio->req_q);
293 id->scan_threshold =
294 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
297} 295}
298 296
299/** 297/**
@@ -311,6 +309,7 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
311 return -ENOMEM; 309 return -ENOMEM;
312 310
313 zfcp_qdio_setup_init_data(&init_data, qdio); 311 zfcp_qdio_setup_init_data(&init_data, qdio);
312 init_waitqueue_head(&qdio->req_q_wq);
314 313
315 return qdio_allocate(&init_data); 314 return qdio_allocate(&init_data);
316} 315}
@@ -328,9 +327,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
328 return; 327 return;
329 328
330 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 329 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
331 spin_lock_bh(&qdio->req_q_lock); 330 spin_lock_irq(&qdio->req_q_lock);
332 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 331 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
333 spin_unlock_bh(&qdio->req_q_lock); 332 spin_unlock_irq(&qdio->req_q_lock);
334 333
335 wake_up(&qdio->req_q_wq); 334 wake_up(&qdio->req_q_wq);
336 335
@@ -385,16 +384,18 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
385 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
386 sbale = &(qdio->res_q[cc]->element[0]); 385 sbale = &(qdio->res_q[cc]->element[0]);
387 sbale->length = 0; 386 sbale->length = 0;
388 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 387 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
388 sbale->sflags = 0;
389 sbale->addr = NULL; 389 sbale->addr = NULL;
390 } 390 }
391 391
392 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) 392 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
393 goto failed_qdio; 393 goto failed_qdio;
394 394
395 /* set index of first avalable SBALS / number of available SBALS */ 395 /* set index of first available SBALS / number of available SBALS */
396 qdio->req_q_idx = 0; 396 qdio->req_q_idx = 0;
397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
398 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
398 399
399 return 0; 400 return 0;
400 401
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2297d8d3e947..54e22ace012b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -67,7 +67,7 @@ struct zfcp_qdio {
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 */ 68 */
69struct zfcp_qdio_req { 69struct zfcp_qdio_req {
70 u32 sbtype; 70 u8 sbtype;
71 u8 sbal_number; 71 u8 sbal_number;
72 u8 sbal_first; 72 u8 sbal_first;
73 u8 sbal_last; 73 u8 sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
116 */ 116 */
117static inline 117static inline
118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
119 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u8 sbtype, void *data, u32 len)
120{ 120{
121 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free), 122 int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
131 131
132 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
133 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; 134 sbale->eflags = 0;
135 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
135 136
136 if (unlikely(!data)) 137 if (unlikely(!data))
137 return; 138 return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
173 struct qdio_buffer_element *sbale; 174 struct qdio_buffer_element *sbale;
174 175
175 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 176 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
176 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 177 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
177} 178}
178 179
179/** 180/**
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index cb000c9833bb..2a4991d6d4d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,6 +30,10 @@ module_param_named(dif, enable_dif, bool, 0600);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif 31#endif
32 32
33static bool allow_lun_scan = 1;
34module_param(allow_lun_scan, bool, 0600);
35MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
36
33static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, 37static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
34 int reason) 38 int reason)
35{ 39{
@@ -49,11 +53,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
49 return sdev->queue_depth; 53 return sdev->queue_depth;
50} 54}
51 55
52static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 56static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
53{ 57{
54 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 58 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
55 unit->device = NULL; 59
56 put_device(&unit->dev); 60 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
61 put_device(&zfcp_sdev->port->dev);
57} 62}
58 63
59static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 64static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -67,47 +72,35 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
67 72
68static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 73static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
69{ 74{
70 struct zfcp_adapter *adapter =
71 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
72
73 set_host_byte(scpnt, result); 75 set_host_byte(scpnt, result);
74 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); 76 zfcp_dbf_scsi_fail_send(scpnt);
75 scpnt->scsi_done(scpnt); 77 scpnt->scsi_done(scpnt);
76} 78}
77 79
78static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 80static
79 void (*done) (struct scsi_cmnd *)) 81int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
80{ 82{
81 struct zfcp_unit *unit; 83 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
82 struct zfcp_adapter *adapter;
83 int status, scsi_result, ret;
84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); 84 struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
85 int status, scsi_result, ret;
85 86
86 /* reset the status for this request */ 87 /* reset the status for this request */
87 scpnt->result = 0; 88 scpnt->result = 0;
88 scpnt->host_scribble = NULL; 89 scpnt->host_scribble = NULL;
89 scpnt->scsi_done = done;
90
91 /*
92 * figure out adapter and target device
93 * (stored there by zfcp_scsi_slave_alloc)
94 */
95 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
96 unit = scpnt->device->hostdata;
97 90
98 scsi_result = fc_remote_port_chkready(rport); 91 scsi_result = fc_remote_port_chkready(rport);
99 if (unlikely(scsi_result)) { 92 if (unlikely(scsi_result)) {
100 scpnt->result = scsi_result; 93 scpnt->result = scsi_result;
101 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); 94 zfcp_dbf_scsi_fail_send(scpnt);
102 scpnt->scsi_done(scpnt); 95 scpnt->scsi_done(scpnt);
103 return 0; 96 return 0;
104 } 97 }
105 98
106 status = atomic_read(&unit->status); 99 status = atomic_read(&zfcp_sdev->status);
107 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && 100 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
108 !(atomic_read(&unit->port->status) & 101 !(atomic_read(&zfcp_sdev->port->status) &
109 ZFCP_STATUS_COMMON_ERP_FAILED)) { 102 ZFCP_STATUS_COMMON_ERP_FAILED)) {
110 /* only unit access denied, but port is good 103 /* only LUN access denied, but port is good
111 * not covered by FC transport, have to fail here */ 104 * not covered by FC transport, have to fail here */
112 zfcp_scsi_command_fail(scpnt, DID_ERROR); 105 zfcp_scsi_command_fail(scpnt, DID_ERROR);
113 return 0; 106 return 0;
@@ -115,8 +108,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115 108
116 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { 109 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
117 /* This could be either 110 /* This could be either
118 * open unit pending: this is temporary, will result in 111 * open LUN pending: this is temporary, will result in
119 * open unit or ERP_FAILED, so retry command 112 * open LUN or ERP_FAILED, so retry command
120 * call to rport_delete pending: mimic retry from 113 * call to rport_delete pending: mimic retry from
121 * fc_remote_port_chkready until rport is BLOCKED 114 * fc_remote_port_chkready until rport is BLOCKED
122 */ 115 */
@@ -124,7 +117,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
124 return 0; 117 return 0;
125 } 118 }
126 119
127 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 120 ret = zfcp_fsf_fcp_cmnd(scpnt);
128 if (unlikely(ret == -EBUSY)) 121 if (unlikely(ret == -EBUSY))
129 return SCSI_MLQUEUE_DEVICE_BUSY; 122 return SCSI_MLQUEUE_DEVICE_BUSY;
130 else if (unlikely(ret < 0)) 123 else if (unlikely(ret < 0))
@@ -133,45 +126,43 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
133 return ret; 126 return ret;
134} 127}
135 128
136static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 129static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
137 unsigned int id, u64 lun)
138{ 130{
139 unsigned long flags; 131 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
132 struct zfcp_adapter *adapter =
133 (struct zfcp_adapter *) sdev->host->hostdata[0];
134 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
140 struct zfcp_port *port; 135 struct zfcp_port *port;
141 struct zfcp_unit *unit = NULL; 136 struct zfcp_unit *unit;
137 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
142 138
143 read_lock_irqsave(&adapter->port_list_lock, flags); 139 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
144 list_for_each_entry(port, &adapter->port_list, list) { 140 if (!port)
145 if (!port->rport || (id != port->rport->scsi_target_id)) 141 return -ENXIO;
146 continue;
147 unit = zfcp_get_unit_by_lun(port, lun);
148 if (unit)
149 break;
150 }
151 read_unlock_irqrestore(&adapter->port_list_lock, flags);
152 142
153 return unit; 143 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
154} 144 if (unit)
145 put_device(&unit->dev);
155 146
156static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 147 if (!unit && !(allow_lun_scan && npiv)) {
157{ 148 put_device(&port->dev);
158 struct zfcp_adapter *adapter; 149 return -ENXIO;
159 struct zfcp_unit *unit; 150 }
160 u64 lun;
161 151
162 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 152 zfcp_sdev->port = port;
163 if (!adapter) 153 zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
164 goto out; 154 zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
155 zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
156 zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
157 zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
158 zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
159 spin_lock_init(&zfcp_sdev->latencies.lock);
165 160
166 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); 161 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
167 unit = zfcp_unit_lookup(adapter, sdp->id, lun); 162 zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
168 if (unit) { 163 zfcp_erp_wait(port->adapter);
169 sdp->hostdata = unit; 164
170 unit->device = sdp; 165 return 0;
171 return 0;
172 }
173out:
174 return -ENXIO;
175} 166}
176 167
177static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 168static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
179 struct Scsi_Host *scsi_host = scpnt->device->host; 170 struct Scsi_Host *scsi_host = scpnt->device->host;
180 struct zfcp_adapter *adapter = 171 struct zfcp_adapter *adapter =
181 (struct zfcp_adapter *) scsi_host->hostdata[0]; 172 (struct zfcp_adapter *) scsi_host->hostdata[0];
182 struct zfcp_unit *unit = scpnt->device->hostdata;
183 struct zfcp_fsf_req *old_req, *abrt_req; 173 struct zfcp_fsf_req *old_req, *abrt_req;
184 unsigned long flags; 174 unsigned long flags;
185 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 175 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
@@ -193,8 +183,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
193 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); 183 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
194 if (!old_req) { 184 if (!old_req) {
195 write_unlock_irqrestore(&adapter->abort_lock, flags); 185 write_unlock_irqrestore(&adapter->abort_lock, flags);
196 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, 186 zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
197 old_reqid);
198 return FAILED; /* completion could be in progress */ 187 return FAILED; /* completion could be in progress */
199 } 188 }
200 old_req->data = NULL; 189 old_req->data = NULL;
@@ -203,49 +192,52 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
203 write_unlock_irqrestore(&adapter->abort_lock, flags); 192 write_unlock_irqrestore(&adapter->abort_lock, flags);
204 193
205 while (retry--) { 194 while (retry--) {
206 abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); 195 abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
207 if (abrt_req) 196 if (abrt_req)
208 break; 197 break;
209 198
210 zfcp_erp_wait(adapter); 199 zfcp_erp_wait(adapter);
211 ret = fc_block_scsi_eh(scpnt); 200 ret = fc_block_scsi_eh(scpnt);
212 if (ret) 201 if (ret) {
202 zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
213 return ret; 203 return ret;
204 }
214 if (!(atomic_read(&adapter->status) & 205 if (!(atomic_read(&adapter->status) &
215 ZFCP_STATUS_COMMON_RUNNING)) { 206 ZFCP_STATUS_COMMON_RUNNING)) {
216 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 207 zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
217 old_reqid);
218 return SUCCESS; 208 return SUCCESS;
219 } 209 }
220 } 210 }
221 if (!abrt_req) 211 if (!abrt_req) {
212 zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
222 return FAILED; 213 return FAILED;
214 }
223 215
224 wait_for_completion(&abrt_req->completion); 216 wait_for_completion(&abrt_req->completion);
225 217
226 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 218 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
227 dbf_tag = "okay"; 219 dbf_tag = "abrt_ok";
228 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 220 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
229 dbf_tag = "lte2"; 221 dbf_tag = "abrt_nn";
230 else { 222 else {
231 dbf_tag = "fail"; 223 dbf_tag = "abrt_fa";
232 retval = FAILED; 224 retval = FAILED;
233 } 225 }
234 zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); 226 zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
235 zfcp_fsf_req_free(abrt_req); 227 zfcp_fsf_req_free(abrt_req);
236 return retval; 228 return retval;
237} 229}
238 230
239static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 231static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
240{ 232{
241 struct zfcp_unit *unit = scpnt->device->hostdata; 233 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
242 struct zfcp_adapter *adapter = unit->port->adapter; 234 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
243 struct zfcp_fsf_req *fsf_req = NULL; 235 struct zfcp_fsf_req *fsf_req = NULL;
244 int retval = SUCCESS, ret; 236 int retval = SUCCESS, ret;
245 int retry = 3; 237 int retry = 3;
246 238
247 while (retry--) { 239 while (retry--) {
248 fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); 240 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
249 if (fsf_req) 241 if (fsf_req)
250 break; 242 break;
251 243
@@ -256,7 +248,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
256 248
257 if (!(atomic_read(&adapter->status) & 249 if (!(atomic_read(&adapter->status) &
258 ZFCP_STATUS_COMMON_RUNNING)) { 250 ZFCP_STATUS_COMMON_RUNNING)) {
259 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 251 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
260 return SUCCESS; 252 return SUCCESS;
261 } 253 }
262 } 254 }
@@ -266,10 +258,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
266 wait_for_completion(&fsf_req->completion); 258 wait_for_completion(&fsf_req->completion);
267 259
268 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 260 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
269 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 261 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
270 retval = FAILED; 262 retval = FAILED;
271 } else 263 } else
272 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 264 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
273 265
274 zfcp_fsf_req_free(fsf_req); 266 zfcp_fsf_req_free(fsf_req);
275 return retval; 267 return retval;
@@ -287,11 +279,11 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
287 279
288static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 280static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
289{ 281{
290 struct zfcp_unit *unit = scpnt->device->hostdata; 282 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
291 struct zfcp_adapter *adapter = unit->port->adapter; 283 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
292 int ret; 284 int ret;
293 285
294 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 286 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
295 zfcp_erp_wait(adapter); 287 zfcp_erp_wait(adapter);
296 ret = fc_block_scsi_eh(scpnt); 288 ret = fc_block_scsi_eh(scpnt);
297 if (ret) 289 if (ret)
@@ -300,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
300 return SUCCESS; 292 return SUCCESS;
301} 293}
302 294
303int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) 295struct scsi_transport_template *zfcp_scsi_transport_template;
296
297static struct scsi_host_template zfcp_scsi_host_template = {
298 .module = THIS_MODULE,
299 .name = "zfcp",
300 .queuecommand = zfcp_scsi_queuecommand,
301 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
302 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
303 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
304 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
305 .slave_alloc = zfcp_scsi_slave_alloc,
306 .slave_configure = zfcp_scsi_slave_configure,
307 .slave_destroy = zfcp_scsi_slave_destroy,
308 .change_queue_depth = zfcp_scsi_change_queue_depth,
309 .proc_name = "zfcp",
310 .can_queue = 4096,
311 .this_id = -1,
312 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
313 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
314 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
315 .cmd_per_lun = 1,
316 .use_clustering = 1,
317 .shost_attrs = zfcp_sysfs_shost_attrs,
318 .sdev_attrs = zfcp_sysfs_sdev_attrs,
319};
320
321/**
322 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
323 * @adapter: The zfcp adapter to register with the SCSI midlayer
324 */
325int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
304{ 326{
305 struct ccw_dev_id dev_id; 327 struct ccw_dev_id dev_id;
306 328
@@ -309,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
309 331
310 ccw_device_get_id(adapter->ccw_device, &dev_id); 332 ccw_device_get_id(adapter->ccw_device, &dev_id);
311 /* register adapter as SCSI host with mid layer of SCSI stack */ 333 /* register adapter as SCSI host with mid layer of SCSI stack */
312 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, 334 adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
313 sizeof (struct zfcp_adapter *)); 335 sizeof (struct zfcp_adapter *));
314 if (!adapter->scsi_host) { 336 if (!adapter->scsi_host) {
315 dev_err(&adapter->ccw_device->dev, 337 dev_err(&adapter->ccw_device->dev,
@@ -319,12 +341,12 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
319 } 341 }
320 342
321 /* tell the SCSI stack some characteristics of this adapter */ 343 /* tell the SCSI stack some characteristics of this adapter */
322 adapter->scsi_host->max_id = 1; 344 adapter->scsi_host->max_id = 511;
323 adapter->scsi_host->max_lun = 1; 345 adapter->scsi_host->max_lun = 0xFFFFFFFF;
324 adapter->scsi_host->max_channel = 0; 346 adapter->scsi_host->max_channel = 0;
325 adapter->scsi_host->unique_id = dev_id.devno; 347 adapter->scsi_host->unique_id = dev_id.devno;
326 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ 348 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
327 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 349 adapter->scsi_host->transportt = zfcp_scsi_transport_template;
328 350
329 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 351 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
330 352
@@ -336,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
336 return 0; 358 return 0;
337} 359}
338 360
339void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) 361/**
362 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
363 * @adapter: The zfcp adapter to unregister.
364 */
365void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
340{ 366{
341 struct Scsi_Host *shost; 367 struct Scsi_Host *shost;
342 struct zfcp_port *port; 368 struct zfcp_port *port;
@@ -354,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
354 scsi_remove_host(shost); 380 scsi_remove_host(shost);
355 scsi_host_put(shost); 381 scsi_host_put(shost);
356 adapter->scsi_host = NULL; 382 adapter->scsi_host = NULL;
357
358 return;
359} 383}
360 384
361static struct fc_host_statistics* 385static struct fc_host_statistics*
@@ -529,25 +553,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
529 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 553 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
530 554
531 if (port) { 555 if (port) {
532 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL); 556 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
533 put_device(&port->dev); 557 put_device(&port->dev);
534 } 558 }
535} 559}
536 560
537static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
538{
539 struct zfcp_unit *unit;
540
541 read_lock_irq(&port->unit_list_lock);
542 list_for_each_entry(unit, &port->unit_list, list) {
543 get_device(&unit->dev);
544 if (scsi_queue_work(port->adapter->scsi_host,
545 &unit->scsi_work) <= 0)
546 put_device(&unit->dev);
547 }
548 read_unlock_irq(&port->unit_list_lock);
549}
550
551static void zfcp_scsi_rport_register(struct zfcp_port *port) 561static void zfcp_scsi_rport_register(struct zfcp_port *port)
552{ 562{
553 struct fc_rport_identifiers ids; 563 struct fc_rport_identifiers ids;
@@ -574,7 +584,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
574 port->rport = rport; 584 port->rport = rport;
575 port->starget_id = rport->scsi_target_id; 585 port->starget_id = rport->scsi_target_id;
576 586
577 zfcp_scsi_queue_unit_register(port); 587 zfcp_unit_queue_scsi_scan(port);
578} 588}
579 589
580static void zfcp_scsi_rport_block(struct zfcp_port *port) 590static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -638,29 +648,6 @@ void zfcp_scsi_rport_work(struct work_struct *work)
638} 648}
639 649
640/** 650/**
641 * zfcp_scsi_scan - Register LUN with SCSI midlayer
642 * @unit: The LUN/unit to register
643 */
644void zfcp_scsi_scan(struct zfcp_unit *unit)
645{
646 struct fc_rport *rport = unit->port->rport;
647
648 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
649 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
650 scsilun_to_int((struct scsi_lun *)
651 &unit->fcp_lun), 0);
652}
653
654void zfcp_scsi_scan_work(struct work_struct *work)
655{
656 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
657 scsi_work);
658
659 zfcp_scsi_scan(unit);
660 put_device(&unit->dev);
661}
662
663/**
664 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host 651 * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
665 * @adapter: The adapter where to configure DIF/DIX for the SCSI host 652 * @adapter: The adapter where to configure DIF/DIX for the SCSI host
666 */ 653 */
@@ -681,6 +668,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
681 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 668 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
682 mask |= SHOST_DIX_TYPE1_PROTECTION; 669 mask |= SHOST_DIX_TYPE1_PROTECTION;
683 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 670 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
671 shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
684 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 672 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
685 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; 673 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
686 } 674 }
@@ -732,34 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
732 /* no functions registered for following dynamic attributes but 720 /* no functions registered for following dynamic attributes but
733 directly set by LLDD */ 721 directly set by LLDD */
734 .show_host_port_type = 1, 722 .show_host_port_type = 1,
723 .show_host_symbolic_name = 1,
735 .show_host_speed = 1, 724 .show_host_speed = 1,
736 .show_host_port_id = 1, 725 .show_host_port_id = 1,
737 .disable_target_scan = 1,
738 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), 726 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
739}; 727};
740
741struct zfcp_data zfcp_data = {
742 .scsi_host_template = {
743 .name = "zfcp",
744 .module = THIS_MODULE,
745 .proc_name = "zfcp",
746 .change_queue_depth = zfcp_scsi_change_queue_depth,
747 .slave_alloc = zfcp_scsi_slave_alloc,
748 .slave_configure = zfcp_scsi_slave_configure,
749 .slave_destroy = zfcp_scsi_slave_destroy,
750 .queuecommand = zfcp_scsi_queuecommand,
751 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
752 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
753 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
754 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
755 .can_queue = 4096,
756 .this_id = -1,
757 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
758 .cmd_per_lun = 1,
759 .use_clustering = 1,
760 .sdev_attrs = zfcp_sysfs_sdev_attrs,
761 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
762 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
763 .shost_attrs = zfcp_sysfs_shost_attrs,
764 },
765};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b4561c86e230..cdc4ff78a7ba 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -68,63 +68,95 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 68 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
69 69
70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 70ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
71 atomic_read(&unit->status)); 71 zfcp_unit_sdev_status(unit));
72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", 72ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
73 (atomic_read(&unit->status) & 73 (zfcp_unit_sdev_status(unit) &
74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 74 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (atomic_read(&unit->status) & 76 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", 78ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
79 (atomic_read(&unit->status) & 79 (zfcp_unit_sdev_status(unit) &
80 ZFCP_STATUS_UNIT_SHARED) != 0); 80 ZFCP_STATUS_LUN_SHARED) != 0);
81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", 81ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
82 (atomic_read(&unit->status) & 82 (zfcp_unit_sdev_status(unit) &
83 ZFCP_STATUS_UNIT_READONLY) != 0); 83 ZFCP_STATUS_LUN_READONLY) != 0);
84 84
85#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ 85static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
86static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ 86 struct device_attribute *attr,
87 struct device_attribute *attr, \ 87 char *buf)
88 char *buf) \ 88{
89{ \ 89 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
90 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 90
91 \ 91 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 92 return sprintf(buf, "1\n");
93 return sprintf(buf, "1\n"); \ 93
94 else \ 94 return sprintf(buf, "0\n");
95 return sprintf(buf, "0\n"); \ 95}
96} \ 96
97static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ 97static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
98 struct device_attribute *attr,\ 98 struct device_attribute *attr,
99 const char *buf, size_t count)\ 99 const char *buf, size_t count)
100{ \ 100{
101 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ 101 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
102 unsigned long val; \ 102 unsigned long val;
103 int retval = 0; \ 103
104 \ 104 if (strict_strtoul(buf, 0, &val) || val != 0)
105 if (!(_feat && get_device(&_feat->dev))) \ 105 return -EINVAL;
106 return -EBUSY; \ 106
107 \ 107 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
108 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 108 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
109 retval = -EINVAL; \ 109 zfcp_erp_wait(port->adapter);
110 goto out; \
111 } \
112 \
113 zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
114 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
115 zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
116 _reopen_id, NULL); \
117 zfcp_erp_wait(_adapter); \
118out: \
119 put_device(&_feat->dev); \
120 return retval ? retval : (ssize_t) count; \
121} \
122static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
123 zfcp_sysfs_##_feat##_failed_show, \
124 zfcp_sysfs_##_feat##_failed_store);
125 110
126ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 111 return count;
127ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 112}
113static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
114 zfcp_sysfs_port_failed_show,
115 zfcp_sysfs_port_failed_store);
116
117static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120{
121 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
122 struct scsi_device *sdev;
123 unsigned int status, failed = 1;
124
125 sdev = zfcp_unit_sdev(unit);
126 if (sdev) {
127 status = atomic_read(&sdev_to_zfcp(sdev)->status);
128 failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
129 scsi_device_put(sdev);
130 }
131
132 return sprintf(buf, "%d\n", failed);
133}
134
135static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
138{
139 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
140 unsigned long val;
141 struct scsi_device *sdev;
142
143 if (strict_strtoul(buf, 0, &val) || val != 0)
144 return -EINVAL;
145
146 sdev = zfcp_unit_sdev(unit);
147 if (sdev) {
148 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
149 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
150 "syufai2");
151 zfcp_erp_wait(unit->port->adapter);
152 } else
153 zfcp_unit_scsi_scan(unit);
154
155 return count;
156}
157static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
158 zfcp_sysfs_unit_failed_show,
159 zfcp_sysfs_unit_failed_store);
128 160
129static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, 161static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
130 struct device_attribute *attr, 162 struct device_attribute *attr,
@@ -163,10 +195,9 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
163 goto out; 195 goto out;
164 } 196 }
165 197
166 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, 198 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
167 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
168 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 199 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
169 "syafai2", NULL); 200 "syafai2");
170 zfcp_erp_wait(adapter); 201 zfcp_erp_wait(adapter);
171out: 202out:
172 zfcp_ccw_adapter_put(adapter); 203 zfcp_ccw_adapter_put(adapter);
@@ -224,7 +255,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
224 255
225 put_device(&port->dev); 256 put_device(&port->dev);
226 257
227 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 258 zfcp_erp_port_shutdown(port, 0, "syprs_1");
228 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); 259 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
229 out: 260 out:
230 zfcp_ccw_adapter_put(adapter); 261 zfcp_ccw_adapter_put(adapter);
@@ -257,28 +288,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
257 const char *buf, size_t count) 288 const char *buf, size_t count)
258{ 289{
259 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 290 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
260 struct zfcp_unit *unit;
261 u64 fcp_lun; 291 u64 fcp_lun;
262 int retval = -EINVAL;
263
264 if (!(port && get_device(&port->dev)))
265 return -EBUSY;
266 292
267 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 293 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
268 goto out; 294 return -EINVAL;
269 295
270 unit = zfcp_unit_enqueue(port, fcp_lun); 296 if (zfcp_unit_add(port, fcp_lun))
271 if (IS_ERR(unit)) 297 return -EINVAL;
272 goto out;
273 else
274 retval = 0;
275 298
276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 299 return count;
277 zfcp_erp_wait(unit->port->adapter);
278 zfcp_scsi_scan(unit);
279out:
280 put_device(&port->dev);
281 return retval ? retval : (ssize_t) count;
282} 300}
283static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 301static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
284 302
@@ -287,42 +305,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
287 const char *buf, size_t count) 305 const char *buf, size_t count)
288{ 306{
289 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 307 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
290 struct zfcp_unit *unit;
291 u64 fcp_lun; 308 u64 fcp_lun;
292 int retval = -EINVAL;
293 struct scsi_device *sdev;
294
295 if (!(port && get_device(&port->dev)))
296 return -EBUSY;
297 309
298 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 310 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
299 goto out; 311 return -EINVAL;
300 312
301 unit = zfcp_get_unit_by_lun(port, fcp_lun); 313 if (zfcp_unit_remove(port, fcp_lun))
302 if (!unit) 314 return -EINVAL;
303 goto out;
304 else
305 retval = 0;
306
307 sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
308 port->starget_id,
309 scsilun_to_int((struct scsi_lun *)&fcp_lun));
310 if (sdev) {
311 scsi_remove_device(sdev);
312 scsi_device_put(sdev);
313 }
314
315 write_lock_irq(&port->unit_list_lock);
316 list_del(&unit->list);
317 write_unlock_irq(&port->unit_list_lock);
318
319 put_device(&unit->dev);
320 315
321 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 316 return count;
322 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
323out:
324 put_device(&port->dev);
325 return retval ? retval : (ssize_t) count;
326} 317}
327static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 318static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
328 319
@@ -363,9 +354,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
363 struct device_attribute *attr, \ 354 struct device_attribute *attr, \
364 char *buf) { \ 355 char *buf) { \
365 struct scsi_device *sdev = to_scsi_device(dev); \ 356 struct scsi_device *sdev = to_scsi_device(dev); \
366 struct zfcp_unit *unit = sdev->hostdata; \ 357 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
367 struct zfcp_latencies *lat = &unit->latencies; \ 358 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
368 struct zfcp_adapter *adapter = unit->port->adapter; \ 359 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
369 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ 360 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
370 \ 361 \
371 spin_lock_bh(&lat->lock); \ 362 spin_lock_bh(&lat->lock); \
@@ -394,8 +385,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
394 const char *buf, size_t count) \ 385 const char *buf, size_t count) \
395{ \ 386{ \
396 struct scsi_device *sdev = to_scsi_device(dev); \ 387 struct scsi_device *sdev = to_scsi_device(dev); \
397 struct zfcp_unit *unit = sdev->hostdata; \ 388 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
398 struct zfcp_latencies *lat = &unit->latencies; \ 389 struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
399 unsigned long flags; \ 390 unsigned long flags; \
400 \ 391 \
401 spin_lock_irqsave(&lat->lock, flags); \ 392 spin_lock_irqsave(&lat->lock, flags); \
@@ -423,19 +414,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
423 struct device_attribute *attr,\ 414 struct device_attribute *attr,\
424 char *buf) \ 415 char *buf) \
425{ \ 416{ \
426 struct scsi_device *sdev = to_scsi_device(dev); \ 417 struct scsi_device *sdev = to_scsi_device(dev); \
427 struct zfcp_unit *unit = sdev->hostdata; \ 418 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
419 struct zfcp_port *port = zfcp_sdev->port; \
428 \ 420 \
429 return sprintf(buf, _format, _value); \ 421 return sprintf(buf, _format, _value); \
430} \ 422} \
431static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 423static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
432 424
433ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 425ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
434 dev_name(&unit->port->adapter->ccw_device->dev)); 426 dev_name(&port->adapter->ccw_device->dev));
435ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 427ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
436 (unsigned long long) unit->port->wwpn); 428 (unsigned long long) port->wwpn);
437ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", 429
438 (unsigned long long) unit->fcp_lun); 430static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
431 struct device_attribute *attr,
432 char *buf)
433{
434 struct scsi_device *sdev = to_scsi_device(dev);
435
436 return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
437}
438static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
439 439
440struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 440struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
441 &dev_attr_fcp_lun, 441 &dev_attr_fcp_lun,
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 000000000000..20796ebc33ce
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,244 @@
1/*
2 * zfcp device driver
3 *
4 * Tracking of manually configured LUNs and helper functions to
5 * register the LUNs with the SCSI midlayer.
6 *
7 * Copyright IBM Corporation 2010
8 */
9
10#include "zfcp_def.h"
11#include "zfcp_ext.h"
12
13/**
14 * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
15 * @unit: The zfcp LUN/unit to register
16 *
17 * When the SCSI midlayer is not allowed to automatically scan and
18 * attach SCSI devices, zfcp has to register the single devices with
19 * the SCSI midlayer.
20 */
21void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
22{
23 struct fc_rport *rport = unit->port->rport;
24 unsigned int lun;
25
26 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
27
28 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
29 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
30}
31
32static void zfcp_unit_scsi_scan_work(struct work_struct *work)
33{
34 struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
35 scsi_work);
36
37 zfcp_unit_scsi_scan(unit);
38 put_device(&unit->dev);
39}
40
41/**
42 * zfcp_unit_queue_scsi_scan - Register configured units on port
43 * @port: The zfcp_port where to register units
44 *
45 * After opening a port, all units configured on this port have to be
46 * registered with the SCSI midlayer. This function should be called
47 * after calling fc_remote_port_add, so that the fc_rport is already
48 * ONLINE and the call to scsi_scan_target runs the same way as the
49 * call in the FC transport class.
50 */
51void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
52{
53 struct zfcp_unit *unit;
54
55 read_lock_irq(&port->unit_list_lock);
56 list_for_each_entry(unit, &port->unit_list, list) {
57 get_device(&unit->dev);
58 if (scsi_queue_work(port->adapter->scsi_host,
59 &unit->scsi_work) <= 0)
60 put_device(&unit->dev);
61 }
62 read_unlock_irq(&port->unit_list_lock);
63}
64
65static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
66{
67 struct zfcp_unit *unit;
68
69 list_for_each_entry(unit, &port->unit_list, list)
70 if (unit->fcp_lun == fcp_lun) {
71 get_device(&unit->dev);
72 return unit;
73 }
74
75 return NULL;
76}
77
78/**
79 * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
80 * @port: zfcp_port where to look for the unit
81 * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
82 *
83 * If zfcp_unit is found, a reference is acquired that has to be
84 * released later.
85 *
86 * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
87 * with the specified FCP LUN.
88 */
89struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
90{
91 struct zfcp_unit *unit;
92
93 read_lock_irq(&port->unit_list_lock);
94 unit = _zfcp_unit_find(port, fcp_lun);
95 read_unlock_irq(&port->unit_list_lock);
96 return unit;
97}
98
99/**
100 * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
101 * @dev: pointer to device in zfcp_unit
102 */
103static void zfcp_unit_release(struct device *dev)
104{
105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
106
107 put_device(&unit->port->dev);
108 kfree(unit);
109}
110
111/**
112 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
113 * @port: pointer to port where unit is added
114 * @fcp_lun: FCP LUN of unit to be enqueued
115 * Returns: 0 success
116 *
117 * Sets up some unit internal structures and creates sysfs entry.
118 */
119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
120{
121 struct zfcp_unit *unit;
122
123 unit = zfcp_unit_find(port, fcp_lun);
124 if (unit) {
125 put_device(&unit->dev);
126 return -EEXIST;
127 }
128
129 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
130 if (!unit)
131 return -ENOMEM;
132
133 unit->port = port;
134 unit->fcp_lun = fcp_lun;
135 unit->dev.parent = &port->dev;
136 unit->dev.release = zfcp_unit_release;
137 INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
138
139 if (dev_set_name(&unit->dev, "0x%016llx",
140 (unsigned long long) fcp_lun)) {
141 kfree(unit);
142 return -ENOMEM;
143 }
144
145 get_device(&port->dev);
146
147 if (device_register(&unit->dev)) {
148 put_device(&unit->dev);
149 return -ENOMEM;
150 }
151
152 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
153 device_unregister(&unit->dev);
154 return -EINVAL;
155 }
156
157 write_lock_irq(&port->unit_list_lock);
158 list_add_tail(&unit->list, &port->unit_list);
159 write_unlock_irq(&port->unit_list_lock);
160
161 zfcp_unit_scsi_scan(unit);
162
163 return 0;
164}
165
166/**
167 * zfcp_unit_sdev - Return SCSI device for zfcp_unit
168 * @unit: The zfcp_unit where to get the SCSI device for
169 *
170 * Returns: scsi_device pointer on success, NULL if there is no SCSI
171 * device for this zfcp_unit
172 *
173 * On success, the caller also holds a reference to the SCSI device
174 * that must be released with scsi_device_put.
175 */
176struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
177{
178 struct Scsi_Host *shost;
179 struct zfcp_port *port;
180 unsigned int lun;
181
182 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
183 port = unit->port;
184 shost = port->adapter->scsi_host;
185 return scsi_device_lookup(shost, 0, port->starget_id, lun);
186}
187
188/**
189 * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
190 * @unit: The unit to lookup the SCSI device for
191 *
192 * Returns the zfcp LUN status field of the SCSI device if the SCSI device
193 * for the zfcp_unit exists, 0 otherwise.
194 */
195unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
196{
197 unsigned int status = 0;
198 struct scsi_device *sdev;
199 struct zfcp_scsi_dev *zfcp_sdev;
200
201 sdev = zfcp_unit_sdev(unit);
202 if (sdev) {
203 zfcp_sdev = sdev_to_zfcp(sdev);
204 status = atomic_read(&zfcp_sdev->status);
205 scsi_device_put(sdev);
206 }
207
208 return status;
209}
210
211/**
212 * zfcp_unit_remove - Remove entry from list of configured units
213 * @port: The port where to remove the unit from the configuration
214 * @fcp_lun: The 64 bit LUN of the unit to remove
215 *
216 * Returns: -EINVAL if a unit with the specified LUN does not exist,
217 * 0 on success.
218 */
219int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
220{
221 struct zfcp_unit *unit;
222 struct scsi_device *sdev;
223
224 write_lock_irq(&port->unit_list_lock);
225 unit = _zfcp_unit_find(port, fcp_lun);
226 if (unit)
227 list_del(&unit->list);
228 write_unlock_irq(&port->unit_list_lock);
229
230 if (!unit)
231 return -EINVAL;
232
233 sdev = zfcp_unit_sdev(unit);
234 if (sdev) {
235 scsi_remove_device(sdev);
236 scsi_device_put(sdev);
237 }
238
239 put_device(&unit->dev);
240
241 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
242
243 return 0;
244}