aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig24
-rw-r--r--drivers/s390/block/dasd.c314
-rw-r--r--drivers/s390/block/dasd_3990_erp.c16
-rw-r--r--drivers/s390/block/dasd_devmap.c155
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c664
-rw-r--r--drivers/s390/block/dasd_eckd.h17
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c21
-rw-r--r--drivers/s390/block/dasd_int.h35
-rw-r--r--drivers/s390/char/Kconfig69
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c18
-rw-r--r--drivers/s390/char/sclp_config.c1
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c78
-rw-r--r--drivers/s390/cio/chsc.c19
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/device_ops.c40
-rw-r--r--drivers/s390/cio/itcw.c62
-rw-r--r--drivers/s390/cio/qdio.h31
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c177
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c56
-rw-r--r--drivers/s390/crypto/ap_bus.c67
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c12
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c82
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h25
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c1
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/s390/net/Kconfig51
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
45 files changed, 1607 insertions, 524 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 07883197f474..8e477bb1f3f6 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,7 +2,8 @@ comment "S/390 block device drivers"
2 depends on S390 && BLOCK 2 depends on S390 && BLOCK
3 3
4config BLK_DEV_XPRAM 4config BLK_DEV_XPRAM
5 tristate "XPRAM disk support" 5 def_tristate m
6 prompt "XPRAM disk support"
6 depends on S390 && BLOCK 7 depends on S390 && BLOCK
7 help 8 help
8 Select this option if you want to use your expanded storage on S/390 9 Select this option if you want to use your expanded storage on S/390
@@ -12,13 +13,15 @@ config BLK_DEV_XPRAM
12 xpram. If unsure, say "N". 13 xpram. If unsure, say "N".
13 14
14config DCSSBLK 15config DCSSBLK
15 tristate "DCSSBLK support" 16 def_tristate m
17 prompt "DCSSBLK support"
16 depends on S390 && BLOCK 18 depends on S390 && BLOCK
17 help 19 help
18 Support for dcss block device 20 Support for dcss block device
19 21
20config DASD 22config DASD
21 tristate "Support for DASD devices" 23 def_tristate y
24 prompt "Support for DASD devices"
22 depends on CCW && BLOCK 25 depends on CCW && BLOCK
23 select IOSCHED_DEADLINE 26 select IOSCHED_DEADLINE
24 help 27 help
@@ -27,28 +30,32 @@ config DASD
27 natively on a single image or an LPAR. 30 natively on a single image or an LPAR.
28 31
29config DASD_PROFILE 32config DASD_PROFILE
30 bool "Profiling support for dasd devices" 33 def_bool y
34 prompt "Profiling support for dasd devices"
31 depends on DASD 35 depends on DASD
32 help 36 help
33 Enable this option if you want to see profiling information 37 Enable this option if you want to see profiling information
34 in /proc/dasd/statistics. 38 in /proc/dasd/statistics.
35 39
36config DASD_ECKD 40config DASD_ECKD
37 tristate "Support for ECKD Disks" 41 def_tristate y
42 prompt "Support for ECKD Disks"
38 depends on DASD 43 depends on DASD
39 help 44 help
40 ECKD devices are the most commonly used devices. You should enable 45 ECKD devices are the most commonly used devices. You should enable
41 this option unless you are very sure to have no ECKD device. 46 this option unless you are very sure to have no ECKD device.
42 47
43config DASD_FBA 48config DASD_FBA
44 tristate "Support for FBA Disks" 49 def_tristate y
50 prompt "Support for FBA Disks"
45 depends on DASD 51 depends on DASD
46 help 52 help
47 Select this option to be able to access FBA devices. It is safe to 53 Select this option to be able to access FBA devices. It is safe to
48 say "Y". 54 say "Y".
49 55
50config DASD_DIAG 56config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 57 def_tristate y
58 prompt "Support for DIAG access to Disks"
52 depends on DASD 59 depends on DASD
53 help 60 help
54 Select this option if you want to use Diagnose250 command to access 61 Select this option if you want to use Diagnose250 command to access
@@ -56,7 +63,8 @@ config DASD_DIAG
56 say "N". 63 say "N".
57 64
58config DASD_EER 65config DASD_EER
59 bool "Extended error reporting (EER)" 66 def_bool y
67 prompt "Extended error reporting (EER)"
60 depends on DASD 68 depends on DASD
61 help 69 help
62 This driver provides a character device interface to the 70 This driver provides a character device interface to the
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fb613d70c2cb..794bfd962266 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,6 +11,7 @@
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/kmod.h> 15#include <linux/kmod.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -368,6 +369,11 @@ dasd_state_ready_to_online(struct dasd_device * device)
368 device->state = DASD_STATE_ONLINE; 369 device->state = DASD_STATE_ONLINE;
369 if (device->block) { 370 if (device->block) {
370 dasd_schedule_block_bh(device->block); 371 dasd_schedule_block_bh(device->block);
372 if ((device->features & DASD_FEATURE_USERAW)) {
373 disk = device->block->gdp;
374 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
375 return 0;
376 }
371 disk = device->block->bdev->bd_disk; 377 disk = device->block->bdev->bd_disk;
372 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 378 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
373 while ((part = disk_part_iter_next(&piter))) 379 while ((part = disk_part_iter_next(&piter)))
@@ -393,7 +399,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
393 return rc; 399 return rc;
394 } 400 }
395 device->state = DASD_STATE_READY; 401 device->state = DASD_STATE_READY;
396 if (device->block) { 402 if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
397 disk = device->block->bdev->bd_disk; 403 disk = device->block->bdev->bd_disk;
398 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 404 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
399 while ((part = disk_part_iter_next(&piter))) 405 while ((part = disk_part_iter_next(&piter)))
@@ -744,10 +750,6 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
744 char *data; 750 char *data;
745 int size; 751 int size;
746 752
747 /* Sanity checks */
748 BUG_ON(datasize > PAGE_SIZE ||
749 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
750
751 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 753 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
752 if (cplength > 0) 754 if (cplength > 0)
753 size += cplength * sizeof(struct ccw1); 755 size += cplength * sizeof(struct ccw1);
@@ -853,7 +855,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
853 rc = ccw_device_clear(device->cdev, (long) cqr); 855 rc = ccw_device_clear(device->cdev, (long) cqr);
854 switch (rc) { 856 switch (rc) {
855 case 0: /* termination successful */ 857 case 0: /* termination successful */
856 cqr->retries--;
857 cqr->status = DASD_CQR_CLEAR_PENDING; 858 cqr->status = DASD_CQR_CLEAR_PENDING;
858 cqr->stopclk = get_clock(); 859 cqr->stopclk = get_clock();
859 cqr->starttime = 0; 860 cqr->starttime = 0;
@@ -905,6 +906,16 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
905 return rc; 906 return rc;
906 } 907 }
907 device = (struct dasd_device *) cqr->startdev; 908 device = (struct dasd_device *) cqr->startdev;
909 if (((cqr->block &&
910 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
911 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
912 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
913 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
914 "because of stolen lock", cqr);
915 cqr->status = DASD_CQR_ERROR;
916 cqr->intrc = -EPERM;
917 return -EPERM;
918 }
908 if (cqr->retries < 0) { 919 if (cqr->retries < 0) {
909 /* internal error 14 - start_IO run out of retries */ 920 /* internal error 14 - start_IO run out of retries */
910 sprintf(errorstring, "14 %p", cqr); 921 sprintf(errorstring, "14 %p", cqr);
@@ -916,6 +927,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
916 cqr->startclk = get_clock(); 927 cqr->startclk = get_clock();
917 cqr->starttime = jiffies; 928 cqr->starttime = jiffies;
918 cqr->retries--; 929 cqr->retries--;
930 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
931 cqr->lpm &= device->path_data.opm;
932 if (!cqr->lpm)
933 cqr->lpm = device->path_data.opm;
934 }
919 if (cqr->cpmode == 1) { 935 if (cqr->cpmode == 1) {
920 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 936 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
921 (long) cqr, cqr->lpm); 937 (long) cqr, cqr->lpm);
@@ -928,35 +944,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
928 cqr->status = DASD_CQR_IN_IO; 944 cqr->status = DASD_CQR_IN_IO;
929 break; 945 break;
930 case -EBUSY: 946 case -EBUSY:
931 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 947 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
932 "start_IO: device busy, retry later"); 948 "start_IO: device busy, retry later");
933 break; 949 break;
934 case -ETIMEDOUT: 950 case -ETIMEDOUT:
935 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 951 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
936 "start_IO: request timeout, retry later"); 952 "start_IO: request timeout, retry later");
937 break; 953 break;
938 case -EACCES: 954 case -EACCES:
939 /* -EACCES indicates that the request used only a 955 /* -EACCES indicates that the request used only a subset of the
940 * subset of the available pathes and all these 956 * available paths and all these paths are gone. If the lpm of
941 * pathes are gone. 957 * this request was only a subset of the opm (e.g. the ppm) then
942 * Do a retry with all available pathes. 958 * we just do a retry with all available paths.
959 * If we already use the full opm, something is amiss, and we
960 * need a full path verification.
943 */ 961 */
944 cqr->lpm = LPM_ANYPATH; 962 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
945 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 963 DBF_DEV_EVENT(DBF_WARNING, device,
946 "start_IO: selected pathes gone," 964 "start_IO: selected paths gone (%x)",
947 " retry on all pathes"); 965 cqr->lpm);
966 } else if (cqr->lpm != device->path_data.opm) {
967 cqr->lpm = device->path_data.opm;
968 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
969 "start_IO: selected paths gone,"
970 " retry on all paths");
971 } else {
972 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
973 "start_IO: all paths in opm gone,"
974 " do path verification");
975 dasd_generic_last_path_gone(device);
976 device->path_data.opm = 0;
977 device->path_data.ppm = 0;
978 device->path_data.npm = 0;
979 device->path_data.tbvpm =
980 ccw_device_get_path_mask(device->cdev);
981 }
948 break; 982 break;
949 case -ENODEV: 983 case -ENODEV:
950 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 984 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
951 "start_IO: -ENODEV device gone, retry"); 985 "start_IO: -ENODEV device gone, retry");
952 break; 986 break;
953 case -EIO: 987 case -EIO:
954 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 988 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
955 "start_IO: -EIO device gone, retry"); 989 "start_IO: -EIO device gone, retry");
956 break; 990 break;
957 case -EINVAL: 991 case -EINVAL:
958 /* most likely caused in power management context */ 992 /* most likely caused in power management context */
959 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 993 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
960 "start_IO: -EINVAL device currently " 994 "start_IO: -EINVAL device currently "
961 "not accessible"); 995 "not accessible");
962 break; 996 break;
@@ -1076,6 +1110,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1076 unsigned long long now; 1110 unsigned long long now;
1077 int expires; 1111 int expires;
1078 1112
1113 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
1079 if (IS_ERR(irb)) { 1114 if (IS_ERR(irb)) {
1080 switch (PTR_ERR(irb)) { 1115 switch (PTR_ERR(irb)) {
1081 case -EIO: 1116 case -EIO:
@@ -1094,16 +1129,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1094 } 1129 }
1095 1130
1096 now = get_clock(); 1131 now = get_clock();
1097
1098 /* check for unsolicited interrupts */
1099 cqr = (struct dasd_ccw_req *) intparm; 1132 cqr = (struct dasd_ccw_req *) intparm;
1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1133 /* check for conditions that should be handled immediately */
1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1134 if (!cqr ||
1102 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) || 1135 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1103 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | 1136 scsw_cstat(&irb->scsw) == 0)) {
1104 SCSW_STCTL_ALERT_STATUS))))) {
1105 if (cqr && cqr->status == DASD_CQR_IN_IO)
1106 cqr->status = DASD_CQR_QUEUED;
1107 if (cqr) 1137 if (cqr)
1108 memcpy(&cqr->irb, irb, sizeof(*irb)); 1138 memcpy(&cqr->irb, irb, sizeof(*irb));
1109 device = dasd_device_from_cdev_locked(cdev); 1139 device = dasd_device_from_cdev_locked(cdev);
@@ -1114,17 +1144,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1114 dasd_put_device(device); 1144 dasd_put_device(device);
1115 return; 1145 return;
1116 } 1146 }
1117 device->discipline->dump_sense_dbf(device, irb, 1147 device->discipline->dump_sense_dbf(device, irb, "int");
1118 "unsolicited"); 1148 if (device->features & DASD_FEATURE_ERPLOG)
1119 if ((device->features & DASD_FEATURE_ERPLOG)) 1149 device->discipline->dump_sense(device, cqr, irb);
1120 device->discipline->dump_sense(device, cqr, 1150 device->discipline->check_for_device_change(device, cqr, irb);
1121 irb);
1122 dasd_device_clear_timer(device);
1123 device->discipline->handle_unsolicited_interrupt(device,
1124 irb);
1125 dasd_put_device(device); 1151 dasd_put_device(device);
1126 return;
1127 } 1152 }
1153 if (!cqr)
1154 return;
1128 1155
1129 device = (struct dasd_device *) cqr->startdev; 1156 device = (struct dasd_device *) cqr->startdev;
1130 if (!device || 1157 if (!device ||
@@ -1164,25 +1191,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1164 struct dasd_ccw_req, devlist); 1191 struct dasd_ccw_req, devlist);
1165 } 1192 }
1166 } else { /* error */ 1193 } else { /* error */
1167 memcpy(&cqr->irb, irb, sizeof(struct irb));
1168 /* log sense for every failed I/O to s390 debugfeature */
1169 dasd_log_sense_dbf(cqr, irb);
1170 if (device->features & DASD_FEATURE_ERPLOG) {
1171 dasd_log_sense(cqr, irb);
1172 }
1173
1174 /* 1194 /*
1175 * If we don't want complex ERP for this request, then just 1195 * If we don't want complex ERP for this request, then just
1176 * reset this and retry it in the fastpath 1196 * reset this and retry it in the fastpath
1177 */ 1197 */
1178 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1198 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1179 cqr->retries > 0) { 1199 cqr->retries > 0) {
1180 if (cqr->lpm == LPM_ANYPATH) 1200 if (cqr->lpm == device->path_data.opm)
1181 DBF_DEV_EVENT(DBF_DEBUG, device, 1201 DBF_DEV_EVENT(DBF_DEBUG, device,
1182 "default ERP in fastpath " 1202 "default ERP in fastpath "
1183 "(%i retries left)", 1203 "(%i retries left)",
1184 cqr->retries); 1204 cqr->retries);
1185 cqr->lpm = LPM_ANYPATH; 1205 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1206 cqr->lpm = device->path_data.opm;
1186 cqr->status = DASD_CQR_QUEUED; 1207 cqr->status = DASD_CQR_QUEUED;
1187 next = cqr; 1208 next = cqr;
1188 } else 1209 } else
@@ -1210,13 +1231,13 @@ enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1210 goto out; 1231 goto out;
1211 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1232 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1212 device->state != device->target || 1233 device->state != device->target ||
1213 !device->discipline->handle_unsolicited_interrupt){ 1234 !device->discipline->check_for_device_change){
1214 dasd_put_device(device); 1235 dasd_put_device(device);
1215 goto out; 1236 goto out;
1216 } 1237 }
1217 1238 if (device->discipline->dump_sense_dbf)
1218 dasd_device_clear_timer(device); 1239 device->discipline->dump_sense_dbf(device, irb, "uc");
1219 device->discipline->handle_unsolicited_interrupt(device, irb); 1240 device->discipline->check_for_device_change(device, NULL, irb);
1220 dasd_put_device(device); 1241 dasd_put_device(device);
1221out: 1242out:
1222 return UC_TODO_RETRY; 1243 return UC_TODO_RETRY;
@@ -1366,8 +1387,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
1366 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1387 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1367 if (cqr->status != DASD_CQR_QUEUED) 1388 if (cqr->status != DASD_CQR_QUEUED)
1368 return; 1389 return;
1369 /* when device is stopped, return request to previous layer */ 1390 /* when device is stopped, return request to previous layer
1370 if (device->stopped) { 1391 * exception: only the disconnect or unresumed bits are set and the
1392 * cqr is a path verification request
1393 */
1394 if (device->stopped &&
1395 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1396 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
1397 cqr->intrc = -EAGAIN;
1371 cqr->status = DASD_CQR_CLEARED; 1398 cqr->status = DASD_CQR_CLEARED;
1372 dasd_schedule_device_bh(device); 1399 dasd_schedule_device_bh(device);
1373 return; 1400 return;
@@ -1383,6 +1410,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
1383 dasd_device_set_timer(device, 50); 1410 dasd_device_set_timer(device, 50);
1384} 1411}
1385 1412
1413static void __dasd_device_check_path_events(struct dasd_device *device)
1414{
1415 int rc;
1416
1417 if (device->path_data.tbvpm) {
1418 if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
1419 DASD_UNRESUMED_PM))
1420 return;
1421 rc = device->discipline->verify_path(
1422 device, device->path_data.tbvpm);
1423 if (rc)
1424 dasd_device_set_timer(device, 50);
1425 else
1426 device->path_data.tbvpm = 0;
1427 }
1428};
1429
1386/* 1430/*
1387 * Go through all request on the dasd_device request queue, 1431 * Go through all request on the dasd_device request queue,
1388 * terminate them on the cdev if necessary, and return them to the 1432 * terminate them on the cdev if necessary, and return them to the
@@ -1457,6 +1501,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
1457 __dasd_device_check_expire(device); 1501 __dasd_device_check_expire(device);
1458 /* find final requests on ccw queue */ 1502 /* find final requests on ccw queue */
1459 __dasd_device_process_ccw_queue(device, &final_queue); 1503 __dasd_device_process_ccw_queue(device, &final_queue);
1504 __dasd_device_check_path_events(device);
1460 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1505 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1461 /* Now call the callback function of requests with final status */ 1506 /* Now call the callback function of requests with final status */
1462 __dasd_device_process_final_queue(device, &final_queue); 1507 __dasd_device_process_final_queue(device, &final_queue);
@@ -1613,7 +1658,12 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1613 continue; 1658 continue;
1614 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1659 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1615 continue; 1660 continue;
1616 1661 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1662 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1663 cqr->status = DASD_CQR_FAILED;
1664 cqr->intrc = -EPERM;
1665 continue;
1666 }
1617 /* Non-temporary stop condition will trigger fail fast */ 1667 /* Non-temporary stop condition will trigger fail fast */
1618 if (device->stopped & ~DASD_STOPPED_PENDING && 1668 if (device->stopped & ~DASD_STOPPED_PENDING &&
1619 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1669 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -1621,7 +1671,6 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1621 cqr->status = DASD_CQR_FAILED; 1671 cqr->status = DASD_CQR_FAILED;
1622 continue; 1672 continue;
1623 } 1673 }
1624
1625 /* Don't try to start requests if device is stopped */ 1674 /* Don't try to start requests if device is stopped */
1626 if (interruptible) { 1675 if (interruptible) {
1627 rc = wait_event_interruptible( 1676 rc = wait_event_interruptible(
@@ -1706,13 +1755,18 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1706 int rc; 1755 int rc;
1707 1756
1708 device = cqr->startdev; 1757 device = cqr->startdev;
1758 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
1759 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1760 cqr->status = DASD_CQR_FAILED;
1761 cqr->intrc = -EPERM;
1762 return -EIO;
1763 }
1709 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1764 spin_lock_irq(get_ccwdev_lock(device->cdev));
1710 rc = _dasd_term_running_cqr(device); 1765 rc = _dasd_term_running_cqr(device);
1711 if (rc) { 1766 if (rc) {
1712 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1767 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1713 return rc; 1768 return rc;
1714 } 1769 }
1715
1716 cqr->callback = dasd_wakeup_cb; 1770 cqr->callback = dasd_wakeup_cb;
1717 cqr->callback_data = DASD_SLEEPON_START_TAG; 1771 cqr->callback_data = DASD_SLEEPON_START_TAG;
1718 cqr->status = DASD_CQR_QUEUED; 1772 cqr->status = DASD_CQR_QUEUED;
@@ -2016,6 +2070,13 @@ static void __dasd_block_start_head(struct dasd_block *block)
2016 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2070 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2017 if (cqr->status != DASD_CQR_FILLED) 2071 if (cqr->status != DASD_CQR_FILLED)
2018 continue; 2072 continue;
2073 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2074 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2075 cqr->status = DASD_CQR_FAILED;
2076 cqr->intrc = -EPERM;
2077 dasd_schedule_block_bh(block);
2078 continue;
2079 }
2019 /* Non-temporary stop condition will trigger fail fast */ 2080 /* Non-temporary stop condition will trigger fail fast */
2020 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2081 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2021 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2082 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -2201,8 +2262,20 @@ static void dasd_setup_queue(struct dasd_block *block)
2201{ 2262{
2202 int max; 2263 int max;
2203 2264
2204 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2265 if (block->base->features & DASD_FEATURE_USERAW) {
2205 max = block->base->discipline->max_blocks << block->s2b_shift; 2266 /*
2267 * the max_blocks value for raw_track access is 256
2268 * it is higher than the native ECKD value because we
2269 * only need one ccw per track
2270 * so the max_hw_sectors are
2271 * 2048 x 512B = 1024kB = 16 tracks
2272 */
2273 max = 2048;
2274 } else {
2275 max = block->base->discipline->max_blocks << block->s2b_shift;
2276 }
2277 blk_queue_logical_block_size(block->request_queue,
2278 block->bp_block);
2206 blk_queue_max_hw_sectors(block->request_queue, max); 2279 blk_queue_max_hw_sectors(block->request_queue, max);
2207 blk_queue_max_segments(block->request_queue, -1L); 2280 blk_queue_max_segments(block->request_queue, -1L);
2208 /* with page sized segments we can translate each segement into 2281 /* with page sized segments we can translate each segement into
@@ -2588,10 +2661,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2588 return 0; 2661 return 0;
2589} 2662}
2590 2663
2664int dasd_generic_last_path_gone(struct dasd_device *device)
2665{
2666 struct dasd_ccw_req *cqr;
2667
2668 dev_warn(&device->cdev->dev, "No operational channel path is left "
2669 "for the device\n");
2670 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
2671 /* First of all call extended error reporting. */
2672 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2673
2674 if (device->state < DASD_STATE_BASIC)
2675 return 0;
2676 /* Device is active. We want to keep it. */
2677 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2678 if ((cqr->status == DASD_CQR_IN_IO) ||
2679 (cqr->status == DASD_CQR_CLEAR_PENDING)) {
2680 cqr->status = DASD_CQR_QUEUED;
2681 cqr->retries++;
2682 }
2683 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2684 dasd_device_clear_timer(device);
2685 dasd_schedule_device_bh(device);
2686 return 1;
2687}
2688EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
2689
2690int dasd_generic_path_operational(struct dasd_device *device)
2691{
2692 dev_info(&device->cdev->dev, "A channel path to the device has become "
2693 "operational\n");
2694 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
2695 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2696 if (device->stopped & DASD_UNRESUMED_PM) {
2697 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2698 dasd_restore_device(device);
2699 return 1;
2700 }
2701 dasd_schedule_device_bh(device);
2702 if (device->block)
2703 dasd_schedule_block_bh(device->block);
2704 return 1;
2705}
2706EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
2707
2591int dasd_generic_notify(struct ccw_device *cdev, int event) 2708int dasd_generic_notify(struct ccw_device *cdev, int event)
2592{ 2709{
2593 struct dasd_device *device; 2710 struct dasd_device *device;
2594 struct dasd_ccw_req *cqr;
2595 int ret; 2711 int ret;
2596 2712
2597 device = dasd_device_from_cdev_locked(cdev); 2713 device = dasd_device_from_cdev_locked(cdev);
@@ -2602,41 +2718,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2602 case CIO_GONE: 2718 case CIO_GONE:
2603 case CIO_BOXED: 2719 case CIO_BOXED:
2604 case CIO_NO_PATH: 2720 case CIO_NO_PATH:
2605 /* First of all call extended error reporting. */ 2721 device->path_data.opm = 0;
2606 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2722 device->path_data.ppm = 0;
2607 2723 device->path_data.npm = 0;
2608 if (device->state < DASD_STATE_BASIC) 2724 ret = dasd_generic_last_path_gone(device);
2609 break;
2610 /* Device is active. We want to keep it. */
2611 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2612 if (cqr->status == DASD_CQR_IN_IO) {
2613 cqr->status = DASD_CQR_QUEUED;
2614 cqr->retries++;
2615 }
2616 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2617 dasd_device_clear_timer(device);
2618 dasd_schedule_device_bh(device);
2619 ret = 1;
2620 break; 2725 break;
2621 case CIO_OPER: 2726 case CIO_OPER:
2622 /* FIXME: add a sanity check. */
2623 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2624 if (device->stopped & DASD_UNRESUMED_PM) {
2625 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2626 dasd_restore_device(device);
2627 ret = 1;
2628 break;
2629 }
2630 dasd_schedule_device_bh(device);
2631 if (device->block)
2632 dasd_schedule_block_bh(device->block);
2633 ret = 1; 2727 ret = 1;
2728 if (device->path_data.opm)
2729 ret = dasd_generic_path_operational(device);
2634 break; 2730 break;
2635 } 2731 }
2636 dasd_put_device(device); 2732 dasd_put_device(device);
2637 return ret; 2733 return ret;
2638} 2734}
2639 2735
2736void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
2737{
2738 int chp;
2739 __u8 oldopm, eventlpm;
2740 struct dasd_device *device;
2741
2742 device = dasd_device_from_cdev_locked(cdev);
2743 if (IS_ERR(device))
2744 return;
2745 for (chp = 0; chp < 8; chp++) {
2746 eventlpm = 0x80 >> chp;
2747 if (path_event[chp] & PE_PATH_GONE) {
2748 oldopm = device->path_data.opm;
2749 device->path_data.opm &= ~eventlpm;
2750 device->path_data.ppm &= ~eventlpm;
2751 device->path_data.npm &= ~eventlpm;
2752 if (oldopm && !device->path_data.opm)
2753 dasd_generic_last_path_gone(device);
2754 }
2755 if (path_event[chp] & PE_PATH_AVAILABLE) {
2756 device->path_data.opm &= ~eventlpm;
2757 device->path_data.ppm &= ~eventlpm;
2758 device->path_data.npm &= ~eventlpm;
2759 device->path_data.tbvpm |= eventlpm;
2760 dasd_schedule_device_bh(device);
2761 }
2762 }
2763 dasd_put_device(device);
2764}
2765EXPORT_SYMBOL_GPL(dasd_generic_path_event);
2766
2767int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
2768{
2769 if (!device->path_data.opm && lpm) {
2770 device->path_data.opm = lpm;
2771 dasd_generic_path_operational(device);
2772 } else
2773 device->path_data.opm |= lpm;
2774 return 0;
2775}
2776EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
2777
2778
2640int dasd_generic_pm_freeze(struct ccw_device *cdev) 2779int dasd_generic_pm_freeze(struct ccw_device *cdev)
2641{ 2780{
2642 struct dasd_ccw_req *cqr, *n; 2781 struct dasd_ccw_req *cqr, *n;
@@ -2646,6 +2785,10 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2646 2785
2647 if (IS_ERR(device)) 2786 if (IS_ERR(device))
2648 return PTR_ERR(device); 2787 return PTR_ERR(device);
2788
2789 if (device->discipline->freeze)
2790 rc = device->discipline->freeze(device);
2791
2649 /* disallow new I/O */ 2792 /* disallow new I/O */
2650 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2793 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2651 /* clear active requests */ 2794 /* clear active requests */
@@ -2682,9 +2825,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2682 list_splice_tail(&freeze_queue, &device->ccw_queue); 2825 list_splice_tail(&freeze_queue, &device->ccw_queue);
2683 spin_unlock_irq(get_ccwdev_lock(cdev)); 2826 spin_unlock_irq(get_ccwdev_lock(cdev));
2684 2827
2685 if (device->discipline->freeze)
2686 rc = device->discipline->freeze(device);
2687
2688 dasd_put_device(device); 2828 dasd_put_device(device);
2689 return rc; 2829 return rc;
2690} 2830}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 968c76cf7127..1654a24817be 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 152 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
153 opm = ccw_device_get_path_mask(device->cdev); 153 opm = ccw_device_get_path_mask(device->cdev);
154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
155 //FIXME: start with get_opm ?
156 if (erp->lpm == 0) 155 if (erp->lpm == 0)
157 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); 156 erp->lpm = device->path_data.opm &
157 ~(erp->irb.esw.esw0.sublog.lpum);
158 else 158 else
159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); 159 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
160 160
@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
270{ 270{
271 erp->function = dasd_3990_erp_action_1; 271 erp->function = dasd_3990_erp_action_1;
272 dasd_3990_erp_alternate_path(erp); 272 dasd_3990_erp_alternate_path(erp);
273 if (erp->status == DASD_CQR_FAILED) { 273 if (erp->status == DASD_CQR_FAILED &&
274 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
274 erp->status = DASD_CQR_FILLED; 275 erp->status = DASD_CQR_FILLED;
275 erp->retries = 10; 276 erp->retries = 10;
276 erp->lpm = LPM_ANYPATH; 277 erp->lpm = erp->startdev->path_data.opm;
277 erp->function = dasd_3990_erp_action_1_sec; 278 erp->function = dasd_3990_erp_action_1_sec;
278 } 279 }
279 return erp; 280 return erp;
@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
1907static void 1908static void
1908dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) 1909dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1909{ 1910{
1910
1911 if (sense[25] & DASD_SENSE_BIT_3) { 1911 if (sense[25] & DASD_SENSE_BIT_3) {
1912 dasd_3990_erp_alternate_path(erp); 1912 dasd_3990_erp_alternate_path(erp);
1913 1913
1914 if (erp->status == DASD_CQR_FAILED) { 1914 if (erp->status == DASD_CQR_FAILED &&
1915 !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
1915 /* reset the lpm and the status to be able to 1916 /* reset the lpm and the status to be able to
1916 * try further actions. */ 1917 * try further actions. */
1917 1918 erp->lpm = erp->startdev->path_data.opm;
1918 erp->lpm = 0;
1919 erp->status = DASD_CQR_NEED_ERP; 1919 erp->status = DASD_CQR_NEED_ERP;
1920 } 1920 }
1921 } 1921 }
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8d41f3ed38d7..cb6a67bc89ff 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp)
208 features |= DASD_FEATURE_READONLY; 208 features |= DASD_FEATURE_READONLY;
209 else if (len == 4 && !strncmp(str, "diag", 4)) 209 else if (len == 4 && !strncmp(str, "diag", 4))
210 features |= DASD_FEATURE_USEDIAG; 210 features |= DASD_FEATURE_USEDIAG;
211 else if (len == 3 && !strncmp(str, "raw", 3))
212 features |= DASD_FEATURE_USERAW;
211 else if (len == 6 && !strncmp(str, "erplog", 6)) 213 else if (len == 6 && !strncmp(str, "erplog", 6))
212 features |= DASD_FEATURE_ERPLOG; 214 features |= DASD_FEATURE_ERPLOG;
213 else if (len == 8 && !strncmp(str, "failfast", 8)) 215 else if (len == 8 && !strncmp(str, "failfast", 8))
@@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device)
639{ 641{
640 wake_up(&dasd_delete_wq); 642 wake_up(&dasd_delete_wq);
641} 643}
644EXPORT_SYMBOL_GPL(dasd_put_device_wake);
642 645
643/* 646/*
644 * Return dasd_device structure associated with cdev. 647 * Return dasd_device structure associated with cdev.
@@ -856,7 +859,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
856 spin_lock(&dasd_devmap_lock); 859 spin_lock(&dasd_devmap_lock);
857 /* Changing diag discipline flag is only allowed in offline state. */ 860 /* Changing diag discipline flag is only allowed in offline state. */
858 rc = count; 861 rc = count;
859 if (!devmap->device) { 862 if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
860 if (val) 863 if (val)
861 devmap->features |= DASD_FEATURE_USEDIAG; 864 devmap->features |= DASD_FEATURE_USEDIAG;
862 else 865 else
@@ -869,6 +872,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
869 872
870static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store); 873static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
871 874
875/*
876 * use_raw controls whether the driver should give access to raw eckd data or
877 * operate in standard mode
878 */
879static ssize_t
880dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
881{
882 struct dasd_devmap *devmap;
883 int use_raw;
884
885 devmap = dasd_find_busid(dev_name(dev));
886 if (!IS_ERR(devmap))
887 use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
888 else
889 use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
890 return sprintf(buf, use_raw ? "1\n" : "0\n");
891}
892
893static ssize_t
894dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
895 const char *buf, size_t count)
896{
897 struct dasd_devmap *devmap;
898 ssize_t rc;
899 unsigned long val;
900
901 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
902 if (IS_ERR(devmap))
903 return PTR_ERR(devmap);
904
905 if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
906 return -EINVAL;
907
908 spin_lock(&dasd_devmap_lock);
909 /* Changing diag discipline flag is only allowed in offline state. */
910 rc = count;
911 if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
912 if (val)
913 devmap->features |= DASD_FEATURE_USERAW;
914 else
915 devmap->features &= ~DASD_FEATURE_USERAW;
916 } else
917 rc = -EPERM;
918 spin_unlock(&dasd_devmap_lock);
919 return rc;
920}
921
922static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
923 dasd_use_raw_store);
924
872static ssize_t 925static ssize_t
873dasd_discipline_show(struct device *dev, struct device_attribute *attr, 926dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 char *buf) 927 char *buf)
@@ -1126,6 +1179,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
1126 1179
1127static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); 1180static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
1128 1181
1182static ssize_t dasd_reservation_policy_show(struct device *dev,
1183 struct device_attribute *attr,
1184 char *buf)
1185{
1186 struct dasd_devmap *devmap;
1187 int rc = 0;
1188
1189 devmap = dasd_find_busid(dev_name(dev));
1190 if (IS_ERR(devmap)) {
1191 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1192 } else {
1193 spin_lock(&dasd_devmap_lock);
1194 if (devmap->features & DASD_FEATURE_FAILONSLCK)
1195 rc = snprintf(buf, PAGE_SIZE, "fail\n");
1196 else
1197 rc = snprintf(buf, PAGE_SIZE, "ignore\n");
1198 spin_unlock(&dasd_devmap_lock);
1199 }
1200 return rc;
1201}
1202
1203static ssize_t dasd_reservation_policy_store(struct device *dev,
1204 struct device_attribute *attr,
1205 const char *buf, size_t count)
1206{
1207 struct dasd_devmap *devmap;
1208 int rc;
1209
1210 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
1211 if (IS_ERR(devmap))
1212 return PTR_ERR(devmap);
1213 rc = 0;
1214 spin_lock(&dasd_devmap_lock);
1215 if (sysfs_streq("ignore", buf))
1216 devmap->features &= ~DASD_FEATURE_FAILONSLCK;
1217 else if (sysfs_streq("fail", buf))
1218 devmap->features |= DASD_FEATURE_FAILONSLCK;
1219 else
1220 rc = -EINVAL;
1221 if (devmap->device)
1222 devmap->device->features = devmap->features;
1223 spin_unlock(&dasd_devmap_lock);
1224 if (rc)
1225 return rc;
1226 else
1227 return count;
1228}
1229
1230static DEVICE_ATTR(reservation_policy, 0644,
1231 dasd_reservation_policy_show, dasd_reservation_policy_store);
1232
1233static ssize_t dasd_reservation_state_show(struct device *dev,
1234 struct device_attribute *attr,
1235 char *buf)
1236{
1237 struct dasd_device *device;
1238 int rc = 0;
1239
1240 device = dasd_device_from_cdev(to_ccwdev(dev));
1241 if (IS_ERR(device))
1242 return snprintf(buf, PAGE_SIZE, "none\n");
1243
1244 if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
1245 rc = snprintf(buf, PAGE_SIZE, "reserved\n");
1246 else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
1247 rc = snprintf(buf, PAGE_SIZE, "lost\n");
1248 else
1249 rc = snprintf(buf, PAGE_SIZE, "none\n");
1250 dasd_put_device(device);
1251 return rc;
1252}
1253
1254static ssize_t dasd_reservation_state_store(struct device *dev,
1255 struct device_attribute *attr,
1256 const char *buf, size_t count)
1257{
1258 struct dasd_device *device;
1259 int rc = 0;
1260
1261 device = dasd_device_from_cdev(to_ccwdev(dev));
1262 if (IS_ERR(device))
1263 return -ENODEV;
1264 if (sysfs_streq("reset", buf))
1265 clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
1266 else
1267 rc = -EINVAL;
1268 dasd_put_device(device);
1269
1270 if (rc)
1271 return rc;
1272 else
1273 return count;
1274}
1275
1276static DEVICE_ATTR(last_known_reservation_state, 0644,
1277 dasd_reservation_state_show, dasd_reservation_state_store);
1278
1129static struct attribute * dasd_attrs[] = { 1279static struct attribute * dasd_attrs[] = {
1130 &dev_attr_readonly.attr, 1280 &dev_attr_readonly.attr,
1131 &dev_attr_discipline.attr, 1281 &dev_attr_discipline.attr,
@@ -1134,10 +1284,13 @@ static struct attribute * dasd_attrs[] = {
1134 &dev_attr_vendor.attr, 1284 &dev_attr_vendor.attr,
1135 &dev_attr_uid.attr, 1285 &dev_attr_uid.attr,
1136 &dev_attr_use_diag.attr, 1286 &dev_attr_use_diag.attr,
1287 &dev_attr_raw_track_access.attr,
1137 &dev_attr_eer_enabled.attr, 1288 &dev_attr_eer_enabled.attr,
1138 &dev_attr_erplog.attr, 1289 &dev_attr_erplog.attr,
1139 &dev_attr_failfast.attr, 1290 &dev_attr_failfast.attr,
1140 &dev_attr_expires.attr, 1291 &dev_attr_expires.attr,
1292 &dev_attr_reservation_policy.attr,
1293 &dev_attr_last_known_reservation_state.attr,
1141 NULL, 1294 NULL,
1142}; 1295};
1143 1296
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 266b34b55403..29143eda9dd9 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -10,6 +10,7 @@
10 10
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/kernel_stat.h>
13#include <linux/stddef.h> 14#include <linux/stddef.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -238,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
238 addr_t ip; 239 addr_t ip;
239 int rc; 240 int rc;
240 241
242 kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
241 switch (ext_int_code >> 24) { 243 switch (ext_int_code >> 24) {
242 case DASD_DIAG_CODE_31BIT: 244 case DASD_DIAG_CODE_31BIT:
243 ip = (addr_t) param32; 245 ip = (addr_t) param32;
@@ -617,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
617 .ebcname = "DIAG", 619 .ebcname = "DIAG",
618 .max_blocks = DIAG_MAX_BLOCKS, 620 .max_blocks = DIAG_MAX_BLOCKS,
619 .check_device = dasd_diag_check_device, 621 .check_device = dasd_diag_check_device,
622 .verify_path = dasd_generic_verify_path,
620 .fill_geometry = dasd_diag_fill_geometry, 623 .fill_geometry = dasd_diag_fill_geometry,
621 .start_IO = dasd_start_diag, 624 .start_IO = dasd_start_diag,
622 .term_IO = dasd_diag_term_IO, 625 .term_IO = dasd_diag_term_IO,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bf61274af3bb..318672d05563 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -54,6 +54,15 @@
54#define ECKD_F7(i) (i->factor7) 54#define ECKD_F7(i) (i->factor7)
55#define ECKD_F8(i) (i->factor8) 55#define ECKD_F8(i) (i->factor8)
56 56
57/*
58 * raw track access always map to 64k in memory
59 * so it maps to 16 blocks of 4k per track
60 */
61#define DASD_RAW_BLOCK_PER_TRACK 16
62#define DASD_RAW_BLOCKSIZE 4096
63/* 64k are 128 x 512 byte sectors */
64#define DASD_RAW_SECTORS_PER_TRACK 128
65
57MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
58 67
59static struct dasd_discipline dasd_eckd_discipline; 68static struct dasd_discipline dasd_eckd_discipline;
@@ -90,6 +99,18 @@ static struct {
90} *dasd_reserve_req; 99} *dasd_reserve_req;
91static DEFINE_MUTEX(dasd_reserve_mutex); 100static DEFINE_MUTEX(dasd_reserve_mutex);
92 101
102/* definitions for the path verification worker */
103struct path_verification_work_data {
104 struct work_struct worker;
105 struct dasd_device *device;
106 struct dasd_ccw_req cqr;
107 struct ccw1 ccw;
108 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
109 int isglobal;
110 __u8 tbvpm;
111};
112static struct path_verification_work_data *path_verification_worker;
113static DEFINE_MUTEX(dasd_path_verification_mutex);
93 114
94/* initial attempt at a probe function. this can be simplified once 115/* initial attempt at a probe function. this can be simplified once
95 * the other detection code is gone */ 116 * the other detection code is gone */
@@ -373,6 +394,23 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
373 data->length = reclen; 394 data->length = reclen;
374 data->operation.operation = 0x03; 395 data->operation.operation = 0x03;
375 break; 396 break;
397 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
398 data->operation.orientation = 0x0;
399 data->operation.operation = 0x3F;
400 data->extended_operation = 0x11;
401 data->length = 0;
402 data->extended_parameter_length = 0x02;
403 if (data->count > 8) {
404 data->extended_parameter[0] = 0xFF;
405 data->extended_parameter[1] = 0xFF;
406 data->extended_parameter[1] <<= (16 - count);
407 } else {
408 data->extended_parameter[0] = 0xFF;
409 data->extended_parameter[0] <<= (8 - count);
410 data->extended_parameter[1] = 0x00;
411 }
412 data->sector = 0xFF;
413 break;
376 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 414 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
377 data->auxiliary.length_valid = 0x1; 415 data->auxiliary.length_valid = 0x1;
378 data->length = reclen; /* not tlf, as one might think */ 416 data->length = reclen; /* not tlf, as one might think */
@@ -396,6 +434,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
396 case DASD_ECKD_CCW_READ_COUNT: 434 case DASD_ECKD_CCW_READ_COUNT:
397 data->operation.operation = 0x06; 435 data->operation.operation = 0x06;
398 break; 436 break;
437 case DASD_ECKD_CCW_READ_TRACK:
438 data->operation.orientation = 0x1;
439 data->operation.operation = 0x0C;
440 data->extended_parameter_length = 0;
441 data->sector = 0xFF;
442 break;
399 case DASD_ECKD_CCW_READ_TRACK_DATA: 443 case DASD_ECKD_CCW_READ_TRACK_DATA:
400 data->auxiliary.length_valid = 0x1; 444 data->auxiliary.length_valid = 0x1;
401 data->length = tlf; 445 data->length = tlf;
@@ -439,10 +483,16 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
439 483
440 ccw->cmd_code = DASD_ECKD_CCW_PFX; 484 ccw->cmd_code = DASD_ECKD_CCW_PFX;
441 ccw->flags = 0; 485 ccw->flags = 0;
442 ccw->count = sizeof(*pfxdata); 486 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
443 ccw->cda = (__u32) __pa(pfxdata); 487 ccw->count = sizeof(*pfxdata) + 2;
488 ccw->cda = (__u32) __pa(pfxdata);
489 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
490 } else {
491 ccw->count = sizeof(*pfxdata);
492 ccw->cda = (__u32) __pa(pfxdata);
493 memset(pfxdata, 0, sizeof(*pfxdata));
494 }
444 495
445 memset(pfxdata, 0, sizeof(*pfxdata));
446 /* prefix data */ 496 /* prefix data */
447 if (format > 1) { 497 if (format > 1) {
448 DBF_DEV_EVENT(DBF_ERR, basedev, 498 DBF_DEV_EVENT(DBF_ERR, basedev,
@@ -476,6 +526,7 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
476 dedata->mask.perm = 0x1; 526 dedata->mask.perm = 0x1;
477 dedata->attributes.operation = basepriv->attrib.operation; 527 dedata->attributes.operation = basepriv->attrib.operation;
478 break; 528 break;
529 case DASD_ECKD_CCW_READ_TRACK:
479 case DASD_ECKD_CCW_READ_TRACK_DATA: 530 case DASD_ECKD_CCW_READ_TRACK_DATA:
480 dedata->mask.perm = 0x1; 531 dedata->mask.perm = 0x1;
481 dedata->attributes.operation = basepriv->attrib.operation; 532 dedata->attributes.operation = basepriv->attrib.operation;
@@ -502,6 +553,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
502 dedata->attributes.operation = DASD_BYPASS_CACHE; 553 dedata->attributes.operation = DASD_BYPASS_CACHE;
503 rc = check_XRC_on_prefix(pfxdata, basedev); 554 rc = check_XRC_on_prefix(pfxdata, basedev);
504 break; 555 break;
556 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
557 dedata->mask.perm = 0x03;
558 dedata->attributes.operation = basepriv->attrib.operation;
559 dedata->blk_size = 0;
560 break;
505 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 561 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
506 dedata->mask.perm = 0x02; 562 dedata->mask.perm = 0x02;
507 dedata->attributes.operation = basepriv->attrib.operation; 563 dedata->attributes.operation = basepriv->attrib.operation;
@@ -755,26 +811,27 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
755 return -EINVAL; 811 return -EINVAL;
756} 812}
757 813
758static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 814static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
759 void *rcd_buffer, 815 struct dasd_ccw_req *cqr,
760 struct ciw *ciw, __u8 lpm) 816 __u8 *rcd_buffer,
817 __u8 lpm)
761{ 818{
762 struct dasd_ccw_req *cqr;
763 struct ccw1 *ccw; 819 struct ccw1 *ccw;
764 820 /*
765 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, 821 * buffer has to start with EBCDIC "V1.0" to show
766 device); 822 * support for virtual device SNEQ
767 823 */
768 if (IS_ERR(cqr)) { 824 rcd_buffer[0] = 0xE5;
769 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 825 rcd_buffer[1] = 0xF1;
770 "Could not allocate RCD request"); 826 rcd_buffer[2] = 0x4B;
771 return cqr; 827 rcd_buffer[3] = 0xF0;
772 }
773 828
774 ccw = cqr->cpaddr; 829 ccw = cqr->cpaddr;
775 ccw->cmd_code = ciw->cmd; 830 ccw->cmd_code = DASD_ECKD_CCW_RCD;
831 ccw->flags = 0;
776 ccw->cda = (__u32)(addr_t)rcd_buffer; 832 ccw->cda = (__u32)(addr_t)rcd_buffer;
777 ccw->count = ciw->count; 833 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
834 cqr->magic = DASD_ECKD_MAGIC;
778 835
779 cqr->startdev = device; 836 cqr->startdev = device;
780 cqr->memdev = device; 837 cqr->memdev = device;
@@ -784,7 +841,30 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
784 cqr->retries = 256; 841 cqr->retries = 256;
785 cqr->buildclk = get_clock(); 842 cqr->buildclk = get_clock();
786 cqr->status = DASD_CQR_FILLED; 843 cqr->status = DASD_CQR_FILLED;
787 return cqr; 844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
845}
846
847static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
848 struct dasd_ccw_req *cqr,
849 __u8 *rcd_buffer,
850 __u8 lpm)
851{
852 struct ciw *ciw;
853 int rc;
854 /*
855 * sanity check: scan for RCD command in extended SenseID data
856 * some devices do not support RCD
857 */
858 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
859 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
860 return -EOPNOTSUPP;
861
862 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
864 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
865 cqr->retries = 5;
866 rc = dasd_sleep_on_immediatly(cqr);
867 return rc;
788} 868}
789 869
790static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 870static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
@@ -797,32 +877,29 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
797 struct dasd_ccw_req *cqr; 877 struct dasd_ccw_req *cqr;
798 878
799 /* 879 /*
800 * scan for RCD command in extended SenseID data 880 * sanity check: scan for RCD command in extended SenseID data
881 * some devices do not support RCD
801 */ 882 */
802 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 883 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
803 if (!ciw || ciw->cmd == 0) { 884 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
804 ret = -EOPNOTSUPP; 885 ret = -EOPNOTSUPP;
805 goto out_error; 886 goto out_error;
806 } 887 }
807 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 888 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
808 if (!rcd_buf) { 889 if (!rcd_buf) {
809 ret = -ENOMEM; 890 ret = -ENOMEM;
810 goto out_error; 891 goto out_error;
811 } 892 }
812 893 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
813 /* 894 0, /* use rcd_buf as data ara */
814 * buffer has to start with EBCDIC "V1.0" to show 895 device);
815 * support for virtual device SNEQ
816 */
817 rcd_buf[0] = 0xE5;
818 rcd_buf[1] = 0xF1;
819 rcd_buf[2] = 0x4B;
820 rcd_buf[3] = 0xF0;
821 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
822 if (IS_ERR(cqr)) { 896 if (IS_ERR(cqr)) {
823 ret = PTR_ERR(cqr); 897 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
898 "Could not allocate RCD request");
899 ret = -ENOMEM;
824 goto out_error; 900 goto out_error;
825 } 901 }
902 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
826 ret = dasd_sleep_on(cqr); 903 ret = dasd_sleep_on(cqr);
827 /* 904 /*
828 * on success we update the user input parms 905 * on success we update the user input parms
@@ -831,7 +908,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
831 if (ret) 908 if (ret)
832 goto out_error; 909 goto out_error;
833 910
834 *rcd_buffer_size = ciw->count; 911 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
835 *rcd_buffer = rcd_buf; 912 *rcd_buffer = rcd_buf;
836 return 0; 913 return 0;
837out_error: 914out_error:
@@ -901,18 +978,18 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
901 void *conf_data; 978 void *conf_data;
902 int conf_len, conf_data_saved; 979 int conf_len, conf_data_saved;
903 int rc; 980 int rc;
904 __u8 lpm; 981 __u8 lpm, opm;
905 struct dasd_eckd_private *private; 982 struct dasd_eckd_private *private;
906 struct dasd_eckd_path *path_data; 983 struct dasd_path *path_data;
907 984
908 private = (struct dasd_eckd_private *) device->private; 985 private = (struct dasd_eckd_private *) device->private;
909 path_data = (struct dasd_eckd_path *) &private->path_data; 986 path_data = &device->path_data;
910 path_data->opm = ccw_device_get_path_mask(device->cdev); 987 opm = ccw_device_get_path_mask(device->cdev);
911 lpm = 0x80; 988 lpm = 0x80;
912 conf_data_saved = 0; 989 conf_data_saved = 0;
913 /* get configuration data per operational path */ 990 /* get configuration data per operational path */
914 for (lpm = 0x80; lpm; lpm>>= 1) { 991 for (lpm = 0x80; lpm; lpm>>= 1) {
915 if (lpm & path_data->opm){ 992 if (lpm & opm) {
916 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 993 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
917 &conf_len, lpm); 994 &conf_len, lpm);
918 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 995 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
@@ -925,6 +1002,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
925 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1002 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
926 "No configuration data " 1003 "No configuration data "
927 "retrieved"); 1004 "retrieved");
1005 /* no further analysis possible */
1006 path_data->opm |= lpm;
928 continue; /* no error */ 1007 continue; /* no error */
929 } 1008 }
930 /* save first valid configuration data */ 1009 /* save first valid configuration data */
@@ -948,6 +1027,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
948 path_data->ppm |= lpm; 1027 path_data->ppm |= lpm;
949 break; 1028 break;
950 } 1029 }
1030 path_data->opm |= lpm;
951 if (conf_data != private->conf_data) 1031 if (conf_data != private->conf_data)
952 kfree(conf_data); 1032 kfree(conf_data);
953 } 1033 }
@@ -955,6 +1035,140 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
955 return 0; 1035 return 0;
956} 1036}
957 1037
1038static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1039{
1040 struct dasd_eckd_private *private;
1041 int mdc;
1042 u32 fcx_max_data;
1043
1044 private = (struct dasd_eckd_private *) device->private;
1045 if (private->fcx_max_data) {
1046 mdc = ccw_device_get_mdc(device->cdev, lpm);
1047 if ((mdc < 0)) {
1048 dev_warn(&device->cdev->dev,
1049 "Detecting the maximum data size for zHPF "
1050 "requests failed (rc=%d) for a new path %x\n",
1051 mdc, lpm);
1052 return mdc;
1053 }
1054 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
1055 if (fcx_max_data < private->fcx_max_data) {
1056 dev_warn(&device->cdev->dev,
1057 "The maximum data size for zHPF requests %u "
1058 "on a new path %x is below the active maximum "
1059 "%u\n", fcx_max_data, lpm,
1060 private->fcx_max_data);
1061 return -EACCES;
1062 }
1063 }
1064 return 0;
1065}
1066
1067static void do_path_verification_work(struct work_struct *work)
1068{
1069 struct path_verification_work_data *data;
1070 struct dasd_device *device;
1071 __u8 lpm, opm, npm, ppm, epm;
1072 unsigned long flags;
1073 int rc;
1074
1075 data = container_of(work, struct path_verification_work_data, worker);
1076 device = data->device;
1077
1078 opm = 0;
1079 npm = 0;
1080 ppm = 0;
1081 epm = 0;
1082 for (lpm = 0x80; lpm; lpm >>= 1) {
1083 if (lpm & data->tbvpm) {
1084 memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1085 memset(&data->cqr, 0, sizeof(data->cqr));
1086 data->cqr.cpaddr = &data->ccw;
1087 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1088 data->rcd_buffer,
1089 lpm);
1090 if (!rc) {
1091 switch (dasd_eckd_path_access(data->rcd_buffer,
1092 DASD_ECKD_RCD_DATA_SIZE)) {
1093 case 0x02:
1094 npm |= lpm;
1095 break;
1096 case 0x03:
1097 ppm |= lpm;
1098 break;
1099 }
1100 opm |= lpm;
1101 } else if (rc == -EOPNOTSUPP) {
1102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1103 "path verification: No configuration "
1104 "data retrieved");
1105 opm |= lpm;
1106 } else if (rc == -EAGAIN) {
1107 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1108 "path verification: device is stopped,"
1109 " try again later");
1110 epm |= lpm;
1111 } else {
1112 dev_warn(&device->cdev->dev,
1113 "Reading device feature codes failed "
1114 "(rc=%d) for new path %x\n", rc, lpm);
1115 continue;
1116 }
1117 if (verify_fcx_max_data(device, lpm)) {
1118 opm &= ~lpm;
1119 npm &= ~lpm;
1120 ppm &= ~lpm;
1121 }
1122 }
1123 }
1124 /*
1125 * There is a small chance that a path is lost again between
1126 * above path verification and the following modification of
1127 * the device opm mask. We could avoid that race here by using
1128 * yet another path mask, but we rather deal with this unlikely
1129 * situation in dasd_start_IO.
1130 */
1131 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1132 if (!device->path_data.opm && opm) {
1133 device->path_data.opm = opm;
1134 dasd_generic_path_operational(device);
1135 } else
1136 device->path_data.opm |= opm;
1137 device->path_data.npm |= npm;
1138 device->path_data.ppm |= ppm;
1139 device->path_data.tbvpm |= epm;
1140 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1141
1142 dasd_put_device(device);
1143 if (data->isglobal)
1144 mutex_unlock(&dasd_path_verification_mutex);
1145 else
1146 kfree(data);
1147}
1148
1149static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1150{
1151 struct path_verification_work_data *data;
1152
1153 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1154 if (!data) {
1155 if (mutex_trylock(&dasd_path_verification_mutex)) {
1156 data = path_verification_worker;
1157 data->isglobal = 1;
1158 } else
1159 return -ENOMEM;
1160 } else {
1161 memset(data, 0, sizeof(*data));
1162 data->isglobal = 0;
1163 }
1164 INIT_WORK(&data->worker, do_path_verification_work);
1165 dasd_get_device(device);
1166 data->device = device;
1167 data->tbvpm = lpm;
1168 schedule_work(&data->worker);
1169 return 0;
1170}
1171
958static int dasd_eckd_read_features(struct dasd_device *device) 1172static int dasd_eckd_read_features(struct dasd_device *device)
959{ 1173{
960 struct dasd_psf_prssd_data *prssdp; 1174 struct dasd_psf_prssd_data *prssdp;
@@ -1105,6 +1319,37 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
1105 "returned rc=%d", private->uid.ssid, rc); 1319 "returned rc=%d", private->uid.ssid, rc);
1106} 1320}
1107 1321
1322static u32 get_fcx_max_data(struct dasd_device *device)
1323{
1324#if defined(CONFIG_64BIT)
1325 int tpm, mdc;
1326 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1327 struct dasd_eckd_private *private;
1328
1329 if (dasd_nofcx)
1330 return 0;
1331 /* is transport mode supported? */
1332 private = (struct dasd_eckd_private *) device->private;
1333 fcx_in_css = css_general_characteristics.fcx;
1334 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1335 fcx_in_features = private->features.feature[40] & 0x80;
1336 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1337
1338 if (!tpm)
1339 return 0;
1340
1341 mdc = ccw_device_get_mdc(device->cdev, 0);
1342 if (mdc < 0) {
1343 dev_warn(&device->cdev->dev, "Detecting the maximum supported"
1344 " data size for zHPF requests failed\n");
1345 return 0;
1346 } else
1347 return mdc * FCX_MAX_DATA_FACTOR;
1348#else
1349 return 0;
1350#endif
1351}
1352
1108/* 1353/*
1109 * Check device characteristics. 1354 * Check device characteristics.
1110 * If the device is accessible using ECKD discipline, the device is enabled. 1355 * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1223,6 +1468,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1223 else 1468 else
1224 private->real_cyl = private->rdc_data.no_cyl; 1469 private->real_cyl = private->rdc_data.no_cyl;
1225 1470
1471 private->fcx_max_data = get_fcx_max_data(device);
1472
1226 readonly = dasd_device_is_ro(device); 1473 readonly = dasd_device_is_ro(device);
1227 if (readonly) 1474 if (readonly)
1228 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1475 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -1404,6 +1651,13 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1404 dasd_sfree_request(init_cqr, device); 1651 dasd_sfree_request(init_cqr, device);
1405 } 1652 }
1406 1653
1654 if (device->features & DASD_FEATURE_USERAW) {
1655 block->bp_block = DASD_RAW_BLOCKSIZE;
1656 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
1657 block->s2b_shift = 3;
1658 goto raw;
1659 }
1660
1407 if (status == INIT_CQR_UNFORMATTED) { 1661 if (status == INIT_CQR_UNFORMATTED) {
1408 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 1662 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1409 return -EMEDIUMTYPE; 1663 return -EMEDIUMTYPE;
@@ -1441,6 +1695,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1441 dev_warn(&device->cdev->dev, 1695 dev_warn(&device->cdev->dev,
1442 "Track 0 has no records following the VTOC\n"); 1696 "Track 0 has no records following the VTOC\n");
1443 } 1697 }
1698
1444 if (count_area != NULL && count_area->kl == 0) { 1699 if (count_area != NULL && count_area->kl == 0) {
1445 /* we found notthing violating our disk layout */ 1700 /* we found notthing violating our disk layout */
1446 if (dasd_check_blocksize(count_area->dl) == 0) 1701 if (dasd_check_blocksize(count_area->dl) == 0)
@@ -1456,6 +1711,8 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1456 block->s2b_shift++; 1711 block->s2b_shift++;
1457 1712
1458 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1713 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1714
1715raw:
1459 block->blocks = (private->real_cyl * 1716 block->blocks = (private->real_cyl *
1460 private->rdc_data.trk_per_cyl * 1717 private->rdc_data.trk_per_cyl *
1461 blk_per_trk); 1718 blk_per_trk);
@@ -1716,6 +1973,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1716 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1973 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1717 dasd_eckd_reset_ccw_to_base_io(cqr); 1974 dasd_eckd_reset_ccw_to_base_io(cqr);
1718 cqr->startdev = cqr->block->base; 1975 cqr->startdev = cqr->block->base;
1976 cqr->lpm = cqr->block->base->path_data.opm;
1719 } 1977 }
1720}; 1978};
1721 1979
@@ -1744,9 +2002,9 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1744 return dasd_default_erp_postaction; 2002 return dasd_default_erp_postaction;
1745} 2003}
1746 2004
1747 2005static void dasd_eckd_check_for_device_change(struct dasd_device *device,
1748static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 2006 struct dasd_ccw_req *cqr,
1749 struct irb *irb) 2007 struct irb *irb)
1750{ 2008{
1751 char mask; 2009 char mask;
1752 char *sense = NULL; 2010 char *sense = NULL;
@@ -1770,40 +2028,41 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1770 /* schedule worker to reload device */ 2028 /* schedule worker to reload device */
1771 dasd_reload_device(device); 2029 dasd_reload_device(device);
1772 } 2030 }
1773
1774 dasd_generic_handle_state_change(device); 2031 dasd_generic_handle_state_change(device);
1775 return; 2032 return;
1776 } 2033 }
1777 2034
1778 /* summary unit check */
1779 sense = dasd_get_sense(irb); 2035 sense = dasd_get_sense(irb);
1780 if (sense && (sense[7] == 0x0D) && 2036 if (!sense)
2037 return;
2038
2039 /* summary unit check */
2040 if ((sense[7] == 0x0D) &&
1781 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2041 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
1782 dasd_alias_handle_summary_unit_check(device, irb); 2042 dasd_alias_handle_summary_unit_check(device, irb);
1783 return; 2043 return;
1784 } 2044 }
1785 2045
1786 /* service information message SIM */ 2046 /* service information message SIM */
1787 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 2047 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
1788 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 2048 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1789 dasd_3990_erp_handle_sim(device, sense); 2049 dasd_3990_erp_handle_sim(device, sense);
1790 dasd_schedule_device_bh(device);
1791 return; 2050 return;
1792 } 2051 }
1793 2052
1794 if ((scsw_cc(&irb->scsw) == 1) && !sense && 2053 /* loss of device reservation is handled via base devices only
1795 (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) && 2054 * as alias devices may be used with several bases
1796 (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) && 2055 */
1797 (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) { 2056 if (device->block && (sense[7] == 0x3F) &&
1798 /* fake irb do nothing, they are handled elsewhere */ 2057 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1799 dasd_schedule_device_bh(device); 2058 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
1800 return; 2059 if (device->features & DASD_FEATURE_FAILONSLCK)
2060 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
2061 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2062 dev_err(&device->cdev->dev,
2063 "The device reservation was lost\n");
1801 } 2064 }
1802 2065}
1803 dasd_schedule_device_bh(device);
1804 return;
1805};
1806
1807 2066
1808static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 2067static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1809 struct dasd_device *startdev, 2068 struct dasd_device *startdev,
@@ -1984,7 +2243,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1984 cqr->memdev = startdev; 2243 cqr->memdev = startdev;
1985 cqr->block = block; 2244 cqr->block = block;
1986 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2245 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
1987 cqr->lpm = private->path_data.ppm; 2246 cqr->lpm = startdev->path_data.ppm;
1988 cqr->retries = 256; 2247 cqr->retries = 256;
1989 cqr->buildclk = get_clock(); 2248 cqr->buildclk = get_clock();
1990 cqr->status = DASD_CQR_FILLED; 2249 cqr->status = DASD_CQR_FILLED;
@@ -2161,7 +2420,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2161 cqr->memdev = startdev; 2420 cqr->memdev = startdev;
2162 cqr->block = block; 2421 cqr->block = block;
2163 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2422 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2164 cqr->lpm = private->path_data.ppm; 2423 cqr->lpm = startdev->path_data.ppm;
2165 cqr->retries = 256; 2424 cqr->retries = 256;
2166 cqr->buildclk = get_clock(); 2425 cqr->buildclk = get_clock();
2167 cqr->status = DASD_CQR_FILLED; 2426 cqr->status = DASD_CQR_FILLED;
@@ -2326,6 +2585,12 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2326 struct tidaw *last_tidaw = NULL; 2585 struct tidaw *last_tidaw = NULL;
2327 int itcw_op; 2586 int itcw_op;
2328 size_t itcw_size; 2587 size_t itcw_size;
2588 u8 tidaw_flags;
2589 unsigned int seg_len, part_len, len_to_track_end;
2590 unsigned char new_track;
2591 sector_t recid, trkid;
2592 unsigned int offs;
2593 unsigned int count, count_to_trk_end;
2329 2594
2330 basedev = block->base; 2595 basedev = block->base;
2331 private = (struct dasd_eckd_private *) basedev->private; 2596 private = (struct dasd_eckd_private *) basedev->private;
@@ -2341,12 +2606,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2341 /* trackbased I/O needs address all memory via TIDAWs, 2606 /* trackbased I/O needs address all memory via TIDAWs,
2342 * not just for 64 bit addresses. This allows us to map 2607 * not just for 64 bit addresses. This allows us to map
2343 * each segment directly to one tidaw. 2608 * each segment directly to one tidaw.
2609 * In the case of write requests, additional tidaws may
2610 * be needed when a segment crosses a track boundary.
2344 */ 2611 */
2345 trkcount = last_trk - first_trk + 1; 2612 trkcount = last_trk - first_trk + 1;
2346 ctidaw = 0; 2613 ctidaw = 0;
2347 rq_for_each_segment(bv, req, iter) { 2614 rq_for_each_segment(bv, req, iter) {
2348 ++ctidaw; 2615 ++ctidaw;
2349 } 2616 }
2617 if (rq_data_dir(req) == WRITE)
2618 ctidaw += (last_trk - first_trk);
2350 2619
2351 /* Allocate the ccw request. */ 2620 /* Allocate the ccw request. */
2352 itcw_size = itcw_calc_size(0, ctidaw, 0); 2621 itcw_size = itcw_calc_size(0, ctidaw, 0);
@@ -2354,15 +2623,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2354 if (IS_ERR(cqr)) 2623 if (IS_ERR(cqr))
2355 return cqr; 2624 return cqr;
2356 2625
2357 cqr->cpmode = 1;
2358 cqr->startdev = startdev;
2359 cqr->memdev = startdev;
2360 cqr->block = block;
2361 cqr->expires = 100*HZ;
2362 cqr->buildclk = get_clock();
2363 cqr->status = DASD_CQR_FILLED;
2364 cqr->retries = 10;
2365
2366 /* transfer length factor: how many bytes to read from the last track */ 2626 /* transfer length factor: how many bytes to read from the last track */
2367 if (first_trk == last_trk) 2627 if (first_trk == last_trk)
2368 tlf = last_offs - first_offs + 1; 2628 tlf = last_offs - first_offs + 1;
@@ -2371,8 +2631,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2371 tlf *= blksize; 2631 tlf *= blksize;
2372 2632
2373 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2633 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2634 if (IS_ERR(itcw)) {
2635 dasd_sfree_request(cqr, startdev);
2636 return ERR_PTR(-EINVAL);
2637 }
2374 cqr->cpaddr = itcw_get_tcw(itcw); 2638 cqr->cpaddr = itcw_get_tcw(itcw);
2375
2376 if (prepare_itcw(itcw, first_trk, last_trk, 2639 if (prepare_itcw(itcw, first_trk, last_trk,
2377 cmd, basedev, startdev, 2640 cmd, basedev, startdev,
2378 first_offs + 1, 2641 first_offs + 1,
@@ -2385,31 +2648,69 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2385 dasd_sfree_request(cqr, startdev); 2648 dasd_sfree_request(cqr, startdev);
2386 return ERR_PTR(-EAGAIN); 2649 return ERR_PTR(-EAGAIN);
2387 } 2650 }
2388
2389 /* 2651 /*
2390 * A tidaw can address 4k of memory, but must not cross page boundaries 2652 * A tidaw can address 4k of memory, but must not cross page boundaries
2391 * We can let the block layer handle this by setting 2653 * We can let the block layer handle this by setting
2392 * blk_queue_segment_boundary to page boundaries and 2654 * blk_queue_segment_boundary to page boundaries and
2393 * blk_max_segment_size to page size when setting up the request queue. 2655 * blk_max_segment_size to page size when setting up the request queue.
2656 * For write requests, a TIDAW must not cross track boundaries, because
2657 * we have to set the CBC flag on the last tidaw for each track.
2394 */ 2658 */
2395 rq_for_each_segment(bv, req, iter) { 2659 if (rq_data_dir(req) == WRITE) {
2396 dst = page_address(bv->bv_page) + bv->bv_offset; 2660 new_track = 1;
2397 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); 2661 recid = first_rec;
2398 if (IS_ERR(last_tidaw)) 2662 rq_for_each_segment(bv, req, iter) {
2399 return (struct dasd_ccw_req *)last_tidaw; 2663 dst = page_address(bv->bv_page) + bv->bv_offset;
2664 seg_len = bv->bv_len;
2665 while (seg_len) {
2666 if (new_track) {
2667 trkid = recid;
2668 offs = sector_div(trkid, blk_per_trk);
2669 count_to_trk_end = blk_per_trk - offs;
2670 count = min((last_rec - recid + 1),
2671 (sector_t)count_to_trk_end);
2672 len_to_track_end = count * blksize;
2673 recid += count;
2674 new_track = 0;
2675 }
2676 part_len = min(seg_len, len_to_track_end);
2677 seg_len -= part_len;
2678 len_to_track_end -= part_len;
2679 /* We need to end the tidaw at track end */
2680 if (!len_to_track_end) {
2681 new_track = 1;
2682 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
2683 } else
2684 tidaw_flags = 0;
2685 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
2686 dst, part_len);
2687 if (IS_ERR(last_tidaw))
2688 return ERR_PTR(-EINVAL);
2689 dst += part_len;
2690 }
2691 }
2692 } else {
2693 rq_for_each_segment(bv, req, iter) {
2694 dst = page_address(bv->bv_page) + bv->bv_offset;
2695 last_tidaw = itcw_add_tidaw(itcw, 0x00,
2696 dst, bv->bv_len);
2697 if (IS_ERR(last_tidaw))
2698 return ERR_PTR(-EINVAL);
2699 }
2400 } 2700 }
2401 2701 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2402 last_tidaw->flags |= 0x80; 2702 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
2403 itcw_finalize(itcw); 2703 itcw_finalize(itcw);
2404 2704
2405 if (blk_noretry_request(req) || 2705 if (blk_noretry_request(req) ||
2406 block->base->features & DASD_FEATURE_FAILFAST) 2706 block->base->features & DASD_FEATURE_FAILFAST)
2407 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2707 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2708 cqr->cpmode = 1;
2408 cqr->startdev = startdev; 2709 cqr->startdev = startdev;
2409 cqr->memdev = startdev; 2710 cqr->memdev = startdev;
2410 cqr->block = block; 2711 cqr->block = block;
2411 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2712 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2412 cqr->lpm = private->path_data.ppm; 2713 cqr->lpm = startdev->path_data.ppm;
2413 cqr->retries = 256; 2714 cqr->retries = 256;
2414 cqr->buildclk = get_clock(); 2715 cqr->buildclk = get_clock();
2415 cqr->status = DASD_CQR_FILLED; 2716 cqr->status = DASD_CQR_FILLED;
@@ -2420,11 +2721,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2420 struct dasd_block *block, 2721 struct dasd_block *block,
2421 struct request *req) 2722 struct request *req)
2422{ 2723{
2423 int tpm, cmdrtd, cmdwtd; 2724 int cmdrtd, cmdwtd;
2424 int use_prefix; 2725 int use_prefix;
2425#if defined(CONFIG_64BIT) 2726 int fcx_multitrack;
2426 int fcx_in_css, fcx_in_gneq, fcx_in_features;
2427#endif
2428 struct dasd_eckd_private *private; 2727 struct dasd_eckd_private *private;
2429 struct dasd_device *basedev; 2728 struct dasd_device *basedev;
2430 sector_t first_rec, last_rec; 2729 sector_t first_rec, last_rec;
@@ -2432,6 +2731,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2432 unsigned int first_offs, last_offs; 2731 unsigned int first_offs, last_offs;
2433 unsigned int blk_per_trk, blksize; 2732 unsigned int blk_per_trk, blksize;
2434 int cdlspecial; 2733 int cdlspecial;
2734 unsigned int data_size;
2435 struct dasd_ccw_req *cqr; 2735 struct dasd_ccw_req *cqr;
2436 2736
2437 basedev = block->base; 2737 basedev = block->base;
@@ -2450,15 +2750,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2450 last_offs = sector_div(last_trk, blk_per_trk); 2750 last_offs = sector_div(last_trk, blk_per_trk);
2451 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2751 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2452 2752
2453 /* is transport mode supported? */ 2753 fcx_multitrack = private->features.feature[40] & 0x20;
2454#if defined(CONFIG_64BIT) 2754 data_size = blk_rq_bytes(req);
2455 fcx_in_css = css_general_characteristics.fcx; 2755 /* tpm write request add CBC data on each track boundary */
2456 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2756 if (rq_data_dir(req) == WRITE)
2457 fcx_in_features = private->features.feature[40] & 0x80; 2757 data_size += (last_trk - first_trk) * 4;
2458 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2459#else
2460 tpm = 0;
2461#endif
2462 2758
2463 /* is read track data and write track data in command mode supported? */ 2759 /* is read track data and write track data in command mode supported? */
2464 cmdrtd = private->features.feature[9] & 0x20; 2760 cmdrtd = private->features.feature[9] & 0x20;
@@ -2468,13 +2764,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2468 cqr = NULL; 2764 cqr = NULL;
2469 if (cdlspecial || dasd_page_cache) { 2765 if (cdlspecial || dasd_page_cache) {
2470 /* do nothing, just fall through to the cmd mode single case */ 2766 /* do nothing, just fall through to the cmd mode single case */
2471 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { 2767 } else if ((data_size <= private->fcx_max_data)
2768 && (fcx_multitrack || (first_trk == last_trk))) {
2472 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 2769 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2473 first_rec, last_rec, 2770 first_rec, last_rec,
2474 first_trk, last_trk, 2771 first_trk, last_trk,
2475 first_offs, last_offs, 2772 first_offs, last_offs,
2476 blk_per_trk, blksize); 2773 blk_per_trk, blksize);
2477 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2774 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2775 (PTR_ERR(cqr) != -ENOMEM))
2478 cqr = NULL; 2776 cqr = NULL;
2479 } else if (use_prefix && 2777 } else if (use_prefix &&
2480 (((rq_data_dir(req) == READ) && cmdrtd) || 2778 (((rq_data_dir(req) == READ) && cmdrtd) ||
@@ -2484,7 +2782,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2484 first_trk, last_trk, 2782 first_trk, last_trk,
2485 first_offs, last_offs, 2783 first_offs, last_offs,
2486 blk_per_trk, blksize); 2784 blk_per_trk, blksize);
2487 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2785 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2786 (PTR_ERR(cqr) != -ENOMEM))
2488 cqr = NULL; 2787 cqr = NULL;
2489 } 2788 }
2490 if (!cqr) 2789 if (!cqr)
@@ -2496,6 +2795,135 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2496 return cqr; 2795 return cqr;
2497} 2796}
2498 2797
2798static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
2799 struct dasd_block *block,
2800 struct request *req)
2801{
2802 struct dasd_eckd_private *private;
2803 unsigned long *idaws;
2804 struct dasd_device *basedev;
2805 struct dasd_ccw_req *cqr;
2806 struct ccw1 *ccw;
2807 struct req_iterator iter;
2808 struct bio_vec *bv;
2809 char *dst;
2810 unsigned char cmd;
2811 unsigned int trkcount;
2812 unsigned int seg_len, len_to_track_end;
2813 unsigned int first_offs;
2814 unsigned int cidaw, cplength, datasize;
2815 sector_t first_trk, last_trk;
2816 unsigned int pfx_datasize;
2817
2818 /*
2819 * raw track access needs to be mutiple of 64k and on 64k boundary
2820 */
2821 if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
2822 cqr = ERR_PTR(-EINVAL);
2823 goto out;
2824 }
2825 if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
2826 DASD_RAW_SECTORS_PER_TRACK) != 0) {
2827 cqr = ERR_PTR(-EINVAL);
2828 goto out;
2829 }
2830
2831 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
2832 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
2833 DASD_RAW_SECTORS_PER_TRACK;
2834 trkcount = last_trk - first_trk + 1;
2835 first_offs = 0;
2836 basedev = block->base;
2837 private = (struct dasd_eckd_private *) basedev->private;
2838
2839 if (rq_data_dir(req) == READ)
2840 cmd = DASD_ECKD_CCW_READ_TRACK;
2841 else if (rq_data_dir(req) == WRITE)
2842 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
2843 else {
2844 cqr = ERR_PTR(-EINVAL);
2845 goto out;
2846 }
2847
2848 /*
2849 * Raw track based I/O needs IDAWs for each page,
2850 * and not just for 64 bit addresses.
2851 */
2852 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
2853
2854 /* 1x prefix + one read/write ccw per track */
2855 cplength = 1 + trkcount;
2856
2857 /*
2858 * struct PFX_eckd_data has up to 2 byte as extended parameter
2859 * this is needed for write full track and has to be mentioned
2860 * seperately
2861 * add 8 instead of 2 to keep 8 byte boundary
2862 */
2863 pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
2864
2865 datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
2866
2867 /* Allocate the ccw request. */
2868 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
2869 datasize, startdev);
2870 if (IS_ERR(cqr))
2871 goto out;
2872 ccw = cqr->cpaddr;
2873
2874 if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
2875 basedev, startdev, 1 /* format */, first_offs + 1,
2876 trkcount, 0, 0) == -EAGAIN) {
2877 /* Clock not in sync and XRC is enabled.
2878 * Try again later.
2879 */
2880 dasd_sfree_request(cqr, startdev);
2881 cqr = ERR_PTR(-EAGAIN);
2882 goto out;
2883 }
2884
2885 idaws = (unsigned long *)(cqr->data + pfx_datasize);
2886
2887 len_to_track_end = 0;
2888
2889 rq_for_each_segment(bv, req, iter) {
2890 dst = page_address(bv->bv_page) + bv->bv_offset;
2891 seg_len = bv->bv_len;
2892 if (!len_to_track_end) {
2893 ccw[-1].flags |= CCW_FLAG_CC;
2894 ccw->cmd_code = cmd;
2895 /* maximum 3390 track size */
2896 ccw->count = 57326;
2897 /* 64k map to one track */
2898 len_to_track_end = 65536;
2899 ccw->cda = (__u32)(addr_t)idaws;
2900 ccw->flags |= CCW_FLAG_IDA;
2901 ccw->flags |= CCW_FLAG_SLI;
2902 ccw++;
2903 }
2904 len_to_track_end -= seg_len;
2905 idaws = idal_create_words(idaws, dst, seg_len);
2906 }
2907
2908 if (blk_noretry_request(req) ||
2909 block->base->features & DASD_FEATURE_FAILFAST)
2910 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2911 cqr->startdev = startdev;
2912 cqr->memdev = startdev;
2913 cqr->block = block;
2914 cqr->expires = startdev->default_expires * HZ;
2915 cqr->lpm = startdev->path_data.ppm;
2916 cqr->retries = 256;
2917 cqr->buildclk = get_clock();
2918 cqr->status = DASD_CQR_FILLED;
2919
2920 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2921 cqr = NULL;
2922out:
2923 return cqr;
2924}
2925
2926
2499static int 2927static int
2500dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2928dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2501{ 2929{
@@ -2600,7 +3028,10 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2600 3028
2601 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 3029 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2602 private->count++; 3030 private->count++;
2603 cqr = dasd_eckd_build_cp(startdev, block, req); 3031 if ((base->features & DASD_FEATURE_USERAW))
3032 cqr = dasd_raw_build_cp(startdev, block, req);
3033 else
3034 cqr = dasd_eckd_build_cp(startdev, block, req);
2604 if (IS_ERR(cqr)) 3035 if (IS_ERR(cqr))
2605 private->count--; 3036 private->count--;
2606 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 3037 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
@@ -2688,6 +3119,8 @@ dasd_eckd_release(struct dasd_device *device)
2688 cqr->status = DASD_CQR_FILLED; 3119 cqr->status = DASD_CQR_FILLED;
2689 3120
2690 rc = dasd_sleep_on_immediatly(cqr); 3121 rc = dasd_sleep_on_immediatly(cqr);
3122 if (!rc)
3123 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2691 3124
2692 if (useglobal) 3125 if (useglobal)
2693 mutex_unlock(&dasd_reserve_mutex); 3126 mutex_unlock(&dasd_reserve_mutex);
@@ -2741,6 +3174,8 @@ dasd_eckd_reserve(struct dasd_device *device)
2741 cqr->status = DASD_CQR_FILLED; 3174 cqr->status = DASD_CQR_FILLED;
2742 3175
2743 rc = dasd_sleep_on_immediatly(cqr); 3176 rc = dasd_sleep_on_immediatly(cqr);
3177 if (!rc)
3178 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2744 3179
2745 if (useglobal) 3180 if (useglobal)
2746 mutex_unlock(&dasd_reserve_mutex); 3181 mutex_unlock(&dasd_reserve_mutex);
@@ -2793,6 +3228,8 @@ dasd_eckd_steal_lock(struct dasd_device *device)
2793 cqr->status = DASD_CQR_FILLED; 3228 cqr->status = DASD_CQR_FILLED;
2794 3229
2795 rc = dasd_sleep_on_immediatly(cqr); 3230 rc = dasd_sleep_on_immediatly(cqr);
3231 if (!rc)
3232 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
2796 3233
2797 if (useglobal) 3234 if (useglobal)
2798 mutex_unlock(&dasd_reserve_mutex); 3235 mutex_unlock(&dasd_reserve_mutex);
@@ -2845,6 +3282,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
2845 cqr->memdev = device; 3282 cqr->memdev = device;
2846 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 3283 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2847 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3284 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3285 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
2848 cqr->retries = 5; 3286 cqr->retries = 5;
2849 cqr->expires = 10 * HZ; 3287 cqr->expires = 10 * HZ;
2850 cqr->buildclk = get_clock(); 3288 cqr->buildclk = get_clock();
@@ -3279,10 +3717,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3279{ 3717{
3280 char *page; 3718 char *page;
3281 int len, sl, sct, residual; 3719 int len, sl, sct, residual;
3282
3283 struct tsb *tsb; 3720 struct tsb *tsb;
3284 u8 *sense; 3721 u8 *sense, *rcq;
3285
3286 3722
3287 page = (char *) get_zeroed_page(GFP_ATOMIC); 3723 page = (char *) get_zeroed_page(GFP_ATOMIC);
3288 if (page == NULL) { 3724 if (page == NULL) {
@@ -3348,12 +3784,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3348 case 2: /* ts_ddpc */ 3784 case 2: /* ts_ddpc */
3349 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3785 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3350 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3786 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3351 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3787 for (sl = 0; sl < 2; sl++) {
3352 " tsb->tsa.ddpc.rcq: "); 3788 len += sprintf(page + len,
3353 for (sl = 0; sl < 16; sl++) { 3789 KERN_ERR PRINTK_HEADER
3790 " tsb->tsa.ddpc.rcq %2d-%2d: ",
3791 (8 * sl), ((8 * sl) + 7));
3792 rcq = tsb->tsa.ddpc.rcq;
3354 for (sct = 0; sct < 8; sct++) { 3793 for (sct = 0; sct < 8; sct++) {
3355 len += sprintf(page + len, " %02x", 3794 len += sprintf(page + len, " %02x",
3356 tsb->tsa.ddpc.rcq[sl]); 3795 rcq[8 * sl + sct]);
3357 } 3796 }
3358 len += sprintf(page + len, "\n"); 3797 len += sprintf(page + len, "\n");
3359 } 3798 }
@@ -3550,6 +3989,7 @@ static struct ccw_driver dasd_eckd_driver = {
3550 .set_offline = dasd_generic_set_offline, 3989 .set_offline = dasd_generic_set_offline,
3551 .set_online = dasd_eckd_set_online, 3990 .set_online = dasd_eckd_set_online,
3552 .notify = dasd_generic_notify, 3991 .notify = dasd_generic_notify,
3992 .path_event = dasd_generic_path_event,
3553 .freeze = dasd_generic_pm_freeze, 3993 .freeze = dasd_generic_pm_freeze,
3554 .thaw = dasd_generic_restore_device, 3994 .thaw = dasd_generic_restore_device,
3555 .restore = dasd_generic_restore_device, 3995 .restore = dasd_generic_restore_device,
@@ -3573,10 +4013,11 @@ static struct dasd_discipline dasd_eckd_discipline = {
3573 .owner = THIS_MODULE, 4013 .owner = THIS_MODULE,
3574 .name = "ECKD", 4014 .name = "ECKD",
3575 .ebcname = "ECKD", 4015 .ebcname = "ECKD",
3576 .max_blocks = 240, 4016 .max_blocks = 190,
3577 .check_device = dasd_eckd_check_characteristics, 4017 .check_device = dasd_eckd_check_characteristics,
3578 .uncheck_device = dasd_eckd_uncheck_device, 4018 .uncheck_device = dasd_eckd_uncheck_device,
3579 .do_analysis = dasd_eckd_do_analysis, 4019 .do_analysis = dasd_eckd_do_analysis,
4020 .verify_path = dasd_eckd_verify_path,
3580 .ready_to_online = dasd_eckd_ready_to_online, 4021 .ready_to_online = dasd_eckd_ready_to_online,
3581 .online_to_ready = dasd_eckd_online_to_ready, 4022 .online_to_ready = dasd_eckd_online_to_ready,
3582 .fill_geometry = dasd_eckd_fill_geometry, 4023 .fill_geometry = dasd_eckd_fill_geometry,
@@ -3586,7 +4027,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
3586 .format_device = dasd_eckd_format_device, 4027 .format_device = dasd_eckd_format_device,
3587 .erp_action = dasd_eckd_erp_action, 4028 .erp_action = dasd_eckd_erp_action,
3588 .erp_postaction = dasd_eckd_erp_postaction, 4029 .erp_postaction = dasd_eckd_erp_postaction,
3589 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 4030 .check_for_device_change = dasd_eckd_check_for_device_change,
3590 .build_cp = dasd_eckd_build_alias_cp, 4031 .build_cp = dasd_eckd_build_alias_cp,
3591 .free_cp = dasd_eckd_free_alias_cp, 4032 .free_cp = dasd_eckd_free_alias_cp,
3592 .dump_sense = dasd_eckd_dump_sense, 4033 .dump_sense = dasd_eckd_dump_sense,
@@ -3609,11 +4050,19 @@ dasd_eckd_init(void)
3609 GFP_KERNEL | GFP_DMA); 4050 GFP_KERNEL | GFP_DMA);
3610 if (!dasd_reserve_req) 4051 if (!dasd_reserve_req)
3611 return -ENOMEM; 4052 return -ENOMEM;
4053 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
4054 GFP_KERNEL | GFP_DMA);
4055 if (!path_verification_worker) {
4056 kfree(dasd_reserve_req);
4057 return -ENOMEM;
4058 }
3612 ret = ccw_driver_register(&dasd_eckd_driver); 4059 ret = ccw_driver_register(&dasd_eckd_driver);
3613 if (!ret) 4060 if (!ret)
3614 wait_for_device_probe(); 4061 wait_for_device_probe();
3615 else 4062 else {
4063 kfree(path_verification_worker);
3616 kfree(dasd_reserve_req); 4064 kfree(dasd_reserve_req);
4065 }
3617 return ret; 4066 return ret;
3618} 4067}
3619 4068
@@ -3621,6 +4070,7 @@ static void __exit
3621dasd_eckd_cleanup(void) 4070dasd_eckd_cleanup(void)
3622{ 4071{
3623 ccw_driver_unregister(&dasd_eckd_driver); 4072 ccw_driver_unregister(&dasd_eckd_driver);
4073 kfree(path_verification_worker);
3624 kfree(dasd_reserve_req); 4074 kfree(dasd_reserve_req);
3625} 4075}
3626 4076
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 12097c24f2f5..4a688a873a77 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -37,14 +37,17 @@
37#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d 37#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
38#define DASD_ECKD_CCW_READ_KD_MT 0x8e 38#define DASD_ECKD_CCW_READ_KD_MT 0x8e
39#define DASD_ECKD_CCW_RELEASE 0x94 39#define DASD_ECKD_CCW_RELEASE 0x94
40#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
40#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 41#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
41#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 42#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
42#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5 43#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
43#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6 44#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
44#define DASD_ECKD_CCW_RESERVE 0xB4 45#define DASD_ECKD_CCW_RESERVE 0xB4
46#define DASD_ECKD_CCW_READ_TRACK 0xDE
45#define DASD_ECKD_CCW_PFX 0xE7 47#define DASD_ECKD_CCW_PFX 0xE7
46#define DASD_ECKD_CCW_PFX_READ 0xEA 48#define DASD_ECKD_CCW_PFX_READ 0xEA
47#define DASD_ECKD_CCW_RSCK 0xF9 49#define DASD_ECKD_CCW_RSCK 0xF9
50#define DASD_ECKD_CCW_RCD 0xFA
48 51
49/* 52/*
50 * Perform Subsystem Function / Sub-Orders 53 * Perform Subsystem Function / Sub-Orders
@@ -57,6 +60,11 @@
57 */ 60 */
58#define LV_COMPAT_CYL 0xFFFE 61#define LV_COMPAT_CYL 0xFFFE
59 62
63
64#define FCX_MAX_DATA_FACTOR 65536
65#define DASD_ECKD_RCD_DATA_SIZE 256
66
67
60/***************************************************************************** 68/*****************************************************************************
61 * SECTION: Type Definitions 69 * SECTION: Type Definitions
62 ****************************************************************************/ 70 ****************************************************************************/
@@ -331,12 +339,6 @@ struct dasd_gneq {
331 __u8 reserved2[22]; 339 __u8 reserved2[22];
332} __attribute__ ((packed)); 340} __attribute__ ((packed));
333 341
334struct dasd_eckd_path {
335 __u8 opm;
336 __u8 ppm;
337 __u8 npm;
338};
339
340struct dasd_rssd_features { 342struct dasd_rssd_features {
341 char feature[256]; 343 char feature[256];
342} __attribute__((packed)); 344} __attribute__((packed));
@@ -442,7 +444,6 @@ struct dasd_eckd_private {
442 struct vd_sneq *vdsneq; 444 struct vd_sneq *vdsneq;
443 struct dasd_gneq *gneq; 445 struct dasd_gneq *gneq;
444 446
445 struct dasd_eckd_path path_data;
446 struct eckd_count count_area[5]; 447 struct eckd_count count_area[5];
447 int init_cqr_status; 448 int init_cqr_status;
448 int uses_cdl; 449 int uses_cdl;
@@ -455,6 +456,8 @@ struct dasd_eckd_private {
455 struct alias_pav_group *pavgroup; 456 struct alias_pav_group *pavgroup;
456 struct alias_lcu *lcu; 457 struct alias_lcu *lcu;
457 int count; 458 int count;
459
460 u32 fcx_max_data;
458}; 461};
459 462
460 463
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 83b4615a3b62..77f778b7b070 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device)
473 cqr->retries = 255; 473 cqr->retries = 255;
474 cqr->expires = 10 * HZ; 474 cqr->expires = 10 * HZ;
475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
476 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
476 477
477 ccw = cqr->cpaddr; 478 ccw = cqr->cpaddr;
478 ccw->cmd_code = DASD_ECKD_CCW_SNSS; 479 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 7656384a811d..0eafe2e421e7 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
96 DBF_DEV_EVENT(DBF_DEBUG, device, 96 DBF_DEV_EVENT(DBF_DEBUG, device,
97 "default ERP called (%i retries left)", 97 "default ERP called (%i retries left)",
98 cqr->retries); 98 cqr->retries);
99 cqr->lpm = LPM_ANYPATH; 99 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
100 cqr->lpm = device->path_data.opm;
100 cqr->status = DASD_CQR_FILLED; 101 cqr->status = DASD_CQR_FILLED;
101 } else { 102 } else {
102 pr_err("%s: default ERP has run out of retries and failed\n", 103 pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bec5486e0e6d..be89b3a893da 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = {
73 .set_offline = dasd_generic_set_offline, 73 .set_offline = dasd_generic_set_offline,
74 .set_online = dasd_fba_set_online, 74 .set_online = dasd_fba_set_online,
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76 .path_event = dasd_generic_path_event,
76 .freeze = dasd_generic_pm_freeze, 77 .freeze = dasd_generic_pm_freeze,
77 .thaw = dasd_generic_restore_device, 78 .thaw = dasd_generic_restore_device,
78 .restore = dasd_generic_restore_device, 79 .restore = dasd_generic_restore_device,
@@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
164 } 165 }
165 166
166 device->default_expires = DASD_EXPIRES; 167 device->default_expires = DASD_EXPIRES;
168 device->path_data.opm = LPM_ANYPATH;
167 169
168 readonly = dasd_device_is_ro(device); 170 readonly = dasd_device_is_ro(device);
169 if (readonly) 171 if (readonly)
@@ -231,24 +233,16 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
231 return NULL; 233 return NULL;
232} 234}
233 235
234static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, 236static void dasd_fba_check_for_device_change(struct dasd_device *device,
235 struct irb *irb) 237 struct dasd_ccw_req *cqr,
238 struct irb *irb)
236{ 239{
237 char mask; 240 char mask;
238 241
239 /* first of all check for state change pending interrupt */ 242 /* first of all check for state change pending interrupt */
240 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 243 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
241 if ((irb->scsw.cmd.dstat & mask) == mask) { 244 if ((irb->scsw.cmd.dstat & mask) == mask)
242 dasd_generic_handle_state_change(device); 245 dasd_generic_handle_state_change(device);
243 return;
244 }
245
246 /* check for unsolicited interrupts */
247 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
248 "unsolicited interrupt received");
249 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
250 dasd_schedule_device_bh(device);
251 return;
252}; 246};
253 247
254static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, 248static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
@@ -596,13 +590,14 @@ static struct dasd_discipline dasd_fba_discipline = {
596 .max_blocks = 96, 590 .max_blocks = 96,
597 .check_device = dasd_fba_check_characteristics, 591 .check_device = dasd_fba_check_characteristics,
598 .do_analysis = dasd_fba_do_analysis, 592 .do_analysis = dasd_fba_do_analysis,
593 .verify_path = dasd_generic_verify_path,
599 .fill_geometry = dasd_fba_fill_geometry, 594 .fill_geometry = dasd_fba_fill_geometry,
600 .start_IO = dasd_start_IO, 595 .start_IO = dasd_start_IO,
601 .term_IO = dasd_term_IO, 596 .term_IO = dasd_term_IO,
602 .handle_terminated_request = dasd_fba_handle_terminated_request, 597 .handle_terminated_request = dasd_fba_handle_terminated_request,
603 .erp_action = dasd_fba_erp_action, 598 .erp_action = dasd_fba_erp_action,
604 .erp_postaction = dasd_fba_erp_postaction, 599 .erp_postaction = dasd_fba_erp_postaction,
605 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt, 600 .check_for_device_change = dasd_fba_check_for_device_change,
606 .build_cp = dasd_fba_build_cp, 601 .build_cp = dasd_fba_build_cp,
607 .free_cp = dasd_fba_free_cp, 602 .free_cp = dasd_fba_free_cp,
608 .dump_sense = dasd_fba_dump_sense, 603 .dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 500678d7116c..df9f6999411d 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -231,6 +231,11 @@ struct dasd_ccw_req {
231/* per dasd_ccw_req flags */ 231/* per dasd_ccw_req flags */
232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 232#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ 233#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
234#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
235#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
236 * stolen. Should not be combined with
237 * DASD_CQR_FLAGS_USE_ERP
238 */
234 239
235/* Signature for error recovery functions. */ 240/* Signature for error recovery functions. */
236typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 241typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -287,6 +292,14 @@ struct dasd_discipline {
287 int (*do_analysis) (struct dasd_block *); 292 int (*do_analysis) (struct dasd_block *);
288 293
289 /* 294 /*
295 * This function is called, when new paths become available.
296 * Disciplins may use this callback to do necessary setup work,
297 * e.g. verify that new path is compatible with the current
298 * configuration.
299 */
300 int (*verify_path)(struct dasd_device *, __u8);
301
302 /*
290 * Last things to do when a device is set online, and first things 303 * Last things to do when a device is set online, and first things
291 * when it is set offline. 304 * when it is set offline.
292 */ 305 */
@@ -325,9 +338,9 @@ struct dasd_discipline {
325 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 338 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
326 struct irb *); 339 struct irb *);
327 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); 340 void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
328 341 void (*check_for_device_change) (struct dasd_device *,
329 void (*handle_unsolicited_interrupt) (struct dasd_device *, 342 struct dasd_ccw_req *,
330 struct irb *); 343 struct irb *);
331 344
332 /* i/o control functions. */ 345 /* i/o control functions. */
333 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); 346 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
@@ -362,6 +375,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
362#define DASD_EER_STATECHANGE 3 375#define DASD_EER_STATECHANGE 3
363#define DASD_EER_PPRCSUSPEND 4 376#define DASD_EER_PPRCSUSPEND 4
364 377
378struct dasd_path {
379 __u8 opm;
380 __u8 tbvpm;
381 __u8 ppm;
382 __u8 npm;
383};
384
365struct dasd_device { 385struct dasd_device {
366 /* Block device stuff. */ 386 /* Block device stuff. */
367 struct dasd_block *block; 387 struct dasd_block *block;
@@ -377,6 +397,7 @@ struct dasd_device {
377 struct dasd_discipline *discipline; 397 struct dasd_discipline *discipline;
378 struct dasd_discipline *base_discipline; 398 struct dasd_discipline *base_discipline;
379 char *private; 399 char *private;
400 struct dasd_path path_data;
380 401
381 /* Device state and target state. */ 402 /* Device state and target state. */
382 int state, target; 403 int state, target;
@@ -456,6 +477,9 @@ struct dasd_block {
456 * confuse this with the user specified 477 * confuse this with the user specified
457 * read-only feature. 478 * read-only feature.
458 */ 479 */
480#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
481#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
482
459 483
460void dasd_put_device_wake(struct dasd_device *); 484void dasd_put_device_wake(struct dasd_device *);
461 485
@@ -620,10 +644,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
620int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); 644int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
621int dasd_generic_set_offline (struct ccw_device *cdev); 645int dasd_generic_set_offline (struct ccw_device *cdev);
622int dasd_generic_notify(struct ccw_device *, int); 646int dasd_generic_notify(struct ccw_device *, int);
647int dasd_generic_last_path_gone(struct dasd_device *);
648int dasd_generic_path_operational(struct dasd_device *);
649
623void dasd_generic_handle_state_change(struct dasd_device *); 650void dasd_generic_handle_state_change(struct dasd_device *);
624int dasd_generic_pm_freeze(struct ccw_device *); 651int dasd_generic_pm_freeze(struct ccw_device *);
625int dasd_generic_restore_device(struct ccw_device *); 652int dasd_generic_restore_device(struct ccw_device *);
626enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); 653enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
654void dasd_generic_path_event(struct ccw_device *, int *);
655int dasd_generic_verify_path(struct dasd_device *, __u8);
627 656
628int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 657int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
629char *dasd_get_sense(struct irb *); 658char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 40834f18754c..dcee3c5c8954 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -2,76 +2,85 @@ comment "S/390 character device drivers"
2 depends on S390 2 depends on S390
3 3
4config TN3270 4config TN3270
5 tristate "Support for locally attached 3270 terminals" 5 def_tristate y
6 prompt "Support for locally attached 3270 terminals"
6 depends on CCW 7 depends on CCW
7 help 8 help
8 Include support for IBM 3270 terminals. 9 Include support for IBM 3270 terminals.
9 10
10config TN3270_TTY 11config TN3270_TTY
11 tristate "Support for tty input/output on 3270 terminals" 12 def_tristate y
13 prompt "Support for tty input/output on 3270 terminals"
12 depends on TN3270 14 depends on TN3270
13 help 15 help
14 Include support for using an IBM 3270 terminal as a Linux tty. 16 Include support for using an IBM 3270 terminal as a Linux tty.
15 17
16config TN3270_FS 18config TN3270_FS
17 tristate "Support for fullscreen applications on 3270 terminals" 19 def_tristate m
20 prompt "Support for fullscreen applications on 3270 terminals"
18 depends on TN3270 21 depends on TN3270
19 help 22 help
20 Include support for fullscreen applications on an IBM 3270 terminal. 23 Include support for fullscreen applications on an IBM 3270 terminal.
21 24
22config TN3270_CONSOLE 25config TN3270_CONSOLE
23 bool "Support for console on 3270 terminal" 26 def_bool y
27 prompt "Support for console on 3270 terminal"
24 depends on TN3270=y && TN3270_TTY=y 28 depends on TN3270=y && TN3270_TTY=y
25 help 29 help
26 Include support for using an IBM 3270 terminal as a Linux system 30 Include support for using an IBM 3270 terminal as a Linux system
27 console. Available only if 3270 support is compiled in statically. 31 console. Available only if 3270 support is compiled in statically.
28 32
29config TN3215 33config TN3215
30 bool "Support for 3215 line mode terminal" 34 def_bool y
35 prompt "Support for 3215 line mode terminal"
31 depends on CCW 36 depends on CCW
32 help 37 help
33 Include support for IBM 3215 line-mode terminals. 38 Include support for IBM 3215 line-mode terminals.
34 39
35config TN3215_CONSOLE 40config TN3215_CONSOLE
36 bool "Support for console on 3215 line mode terminal" 41 def_bool y
42 prompt "Support for console on 3215 line mode terminal"
37 depends on TN3215 43 depends on TN3215
38 help 44 help
39 Include support for using an IBM 3215 line-mode terminal as a 45 Include support for using an IBM 3215 line-mode terminal as a
40 Linux system console. 46 Linux system console.
41 47
42config CCW_CONSOLE 48config CCW_CONSOLE
43 bool 49 def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
44 depends on TN3215_CONSOLE || TN3270_CONSOLE
45 default y
46 50
47config SCLP_TTY 51config SCLP_TTY
48 bool "Support for SCLP line mode terminal" 52 def_bool y
53 prompt "Support for SCLP line mode terminal"
49 depends on S390 54 depends on S390
50 help 55 help
51 Include support for IBM SCLP line-mode terminals. 56 Include support for IBM SCLP line-mode terminals.
52 57
53config SCLP_CONSOLE 58config SCLP_CONSOLE
54 bool "Support for console on SCLP line mode terminal" 59 def_bool y
60 prompt "Support for console on SCLP line mode terminal"
55 depends on SCLP_TTY 61 depends on SCLP_TTY
56 help 62 help
57 Include support for using an IBM HWC line-mode terminal as the Linux 63 Include support for using an IBM HWC line-mode terminal as the Linux
58 system console. 64 system console.
59 65
60config SCLP_VT220_TTY 66config SCLP_VT220_TTY
61 bool "Support for SCLP VT220-compatible terminal" 67 def_bool y
68 prompt "Support for SCLP VT220-compatible terminal"
62 depends on S390 69 depends on S390
63 help 70 help
64 Include support for an IBM SCLP VT220-compatible terminal. 71 Include support for an IBM SCLP VT220-compatible terminal.
65 72
66config SCLP_VT220_CONSOLE 73config SCLP_VT220_CONSOLE
67 bool "Support for console on SCLP VT220-compatible terminal" 74 def_bool y
75 prompt "Support for console on SCLP VT220-compatible terminal"
68 depends on SCLP_VT220_TTY 76 depends on SCLP_VT220_TTY
69 help 77 help
70 Include support for using an IBM SCLP VT220-compatible terminal as a 78 Include support for using an IBM SCLP VT220-compatible terminal as a
71 Linux system console. 79 Linux system console.
72 80
73config SCLP_CPI 81config SCLP_CPI
74 tristate "Control-Program Identification" 82 def_tristate m
83 prompt "Control-Program Identification"
75 depends on S390 84 depends on S390
76 help 85 help
77 This option enables the hardware console interface for system 86 This option enables the hardware console interface for system
@@ -83,7 +92,8 @@ config SCLP_CPI
83 need this feature and intend to run your kernel in LPAR. 92 need this feature and intend to run your kernel in LPAR.
84 93
85config SCLP_ASYNC 94config SCLP_ASYNC
86 tristate "Support for Call Home via Asynchronous SCLP Records" 95 def_tristate m
96 prompt "Support for Call Home via Asynchronous SCLP Records"
87 depends on S390 97 depends on S390
88 help 98 help
89 This option enables the call home function, which is able to inform 99 This option enables the call home function, which is able to inform
@@ -93,7 +103,8 @@ config SCLP_ASYNC
93 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
94 104
95config S390_TAPE 105config S390_TAPE
96 tristate "S/390 tape device support" 106 def_tristate m
107 prompt "S/390 tape device support"
97 depends on CCW 108 depends on CCW
98 help 109 help
99 Select this option if you want to access channel-attached tape 110 Select this option if you want to access channel-attached tape
@@ -109,7 +120,8 @@ comment "S/390 tape interface support"
109 depends on S390_TAPE 120 depends on S390_TAPE
110 121
111config S390_TAPE_BLOCK 122config S390_TAPE_BLOCK
112 bool "Support for tape block devices" 123 def_bool y
124 prompt "Support for tape block devices"
113 depends on S390_TAPE && BLOCK 125 depends on S390_TAPE && BLOCK
114 help 126 help
115 Select this option if you want to access your channel-attached tape 127 Select this option if you want to access your channel-attached tape
@@ -123,7 +135,8 @@ comment "S/390 tape hardware support"
123 depends on S390_TAPE 135 depends on S390_TAPE
124 136
125config S390_TAPE_34XX 137config S390_TAPE_34XX
126 tristate "Support for 3480/3490 tape hardware" 138 def_tristate m
139 prompt "Support for 3480/3490 tape hardware"
127 depends on S390_TAPE 140 depends on S390_TAPE
128 help 141 help
129 Select this option if you want to access IBM 3480/3490 magnetic 142 Select this option if you want to access IBM 3480/3490 magnetic
@@ -131,7 +144,8 @@ config S390_TAPE_34XX
131 It is safe to say "Y" here. 144 It is safe to say "Y" here.
132 145
133config S390_TAPE_3590 146config S390_TAPE_3590
134 tristate "Support for 3590 tape hardware" 147 def_tristate m
148 prompt "Support for 3590 tape hardware"
135 depends on S390_TAPE 149 depends on S390_TAPE
136 help 150 help
137 Select this option if you want to access IBM 3590 magnetic 151 Select this option if you want to access IBM 3590 magnetic
@@ -139,7 +153,8 @@ config S390_TAPE_3590
139 It is safe to say "Y" here. 153 It is safe to say "Y" here.
140 154
141config VMLOGRDR 155config VMLOGRDR
142 tristate "Support for the z/VM recording system services (VM only)" 156 def_tristate m
157 prompt "Support for the z/VM recording system services (VM only)"
143 depends on IUCV 158 depends on IUCV
144 help 159 help
145 Select this option if you want to be able to receive records collected 160 Select this option if you want to be able to receive records collected
@@ -148,29 +163,31 @@ config VMLOGRDR
148 This driver depends on the IUCV support driver. 163 This driver depends on the IUCV support driver.
149 164
150config VMCP 165config VMCP
151 bool "Support for the z/VM CP interface" 166 def_bool y
167 prompt "Support for the z/VM CP interface"
152 depends on S390 168 depends on S390
153 help 169 help
154 Select this option if you want to be able to interact with the control 170 Select this option if you want to be able to interact with the control
155 program on z/VM 171 program on z/VM
156 172
157config MONREADER 173config MONREADER
158 tristate "API for reading z/VM monitor service records" 174 def_tristate m
175 prompt "API for reading z/VM monitor service records"
159 depends on IUCV 176 depends on IUCV
160 help 177 help
161 Character device driver for reading z/VM monitor service records 178 Character device driver for reading z/VM monitor service records
162 179
163config MONWRITER 180config MONWRITER
164 tristate "API for writing z/VM monitor service records" 181 def_tristate m
182 prompt "API for writing z/VM monitor service records"
165 depends on S390 183 depends on S390
166 default "m"
167 help 184 help
168 Character device driver for writing z/VM monitor service records 185 Character device driver for writing z/VM monitor service records
169 186
170config S390_VMUR 187config S390_VMUR
171 tristate "z/VM unit record device driver" 188 def_tristate m
189 prompt "z/VM unit record device driver"
172 depends on S390 190 depends on S390
173 default "m"
174 help 191 help
175 Character device driver for z/VM reader, puncher and printer. 192 Character device driver for z/VM reader, puncher and printer.
176 193
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 59ec073724bf..3fb4335d491d 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,6 +9,7 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/kernel_stat.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kdev_t.h> 15#include <linux/kdev_t.h>
@@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
361 int cstat, dstat; 362 int cstat, dstat;
362 int count; 363 int count;
363 364
365 kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
364 raw = dev_get_drvdata(&cdev->dev); 366 raw = dev_get_drvdata(&cdev->dev);
365 req = (struct raw3215_req *) intparm; 367 req = (struct raw3215_req *) intparm;
366 cstat = irb->scsw.cmd.cstat; 368 cstat = irb->scsw.cmd.cstat;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2a4c566456e7..96ba2fd1c8ad 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,6 +7,7 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
329 struct raw3270_request *rq; 330 struct raw3270_request *rq;
330 int rc; 331 int rc;
331 332
333 kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
332 rp = dev_get_drvdata(&cdev->dev); 334 rp = dev_get_drvdata(&cdev->dev);
333 if (!rp) 335 if (!rp)
334 return; 336 return;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 35cc4686b99b..b76c61f82485 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
@@ -18,16 +19,14 @@
18#include <linux/suspend.h> 19#include <linux/suspend.h>
19#include <linux/completion.h> 20#include <linux/completion.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <asm/types.h>
22#include <asm/s390_ext.h> 22#include <asm/s390_ext.h>
23#include <asm/types.h>
24#include <asm/irq.h>
23 25
24#include "sclp.h" 26#include "sclp.h"
25 27
26#define SCLP_HEADER "sclp: " 28#define SCLP_HEADER "sclp: "
27 29
28/* Structure for register_early_external_interrupt. */
29static ext_int_info_t ext_int_info_hwc;
30
31/* Lock to protect internal data consistency. */ 30/* Lock to protect internal data consistency. */
32static DEFINE_SPINLOCK(sclp_lock); 31static DEFINE_SPINLOCK(sclp_lock);
33 32
@@ -402,6 +401,7 @@ static void sclp_interrupt_handler(unsigned int ext_int_code,
402 u32 finished_sccb; 401 u32 finished_sccb;
403 u32 evbuf_pending; 402 u32 evbuf_pending;
404 403
404 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
405 spin_lock(&sclp_lock); 405 spin_lock(&sclp_lock);
406 finished_sccb = param32 & 0xfffffff8; 406 finished_sccb = param32 & 0xfffffff8;
407 evbuf_pending = param32 & 0x3; 407 evbuf_pending = param32 & 0x3;
@@ -824,6 +824,7 @@ static void sclp_check_handler(unsigned int ext_int_code,
824{ 824{
825 u32 finished_sccb; 825 u32 finished_sccb;
826 826
827 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
827 finished_sccb = param32 & 0xfffffff8; 828 finished_sccb = param32 & 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */ 829 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb == 0) 830 if (finished_sccb == 0)
@@ -866,8 +867,7 @@ sclp_check_interface(void)
866 867
867 spin_lock_irqsave(&sclp_lock, flags); 868 spin_lock_irqsave(&sclp_lock, flags);
868 /* Prepare init mask command */ 869 /* Prepare init mask command */
869 rc = register_early_external_interrupt(0x2401, sclp_check_handler, 870 rc = register_external_interrupt(0x2401, sclp_check_handler);
870 &ext_int_info_hwc);
871 if (rc) { 871 if (rc) {
872 spin_unlock_irqrestore(&sclp_lock, flags); 872 spin_unlock_irqrestore(&sclp_lock, flags);
873 return rc; 873 return rc;
@@ -900,8 +900,7 @@ sclp_check_interface(void)
900 } else 900 } else
901 rc = -EBUSY; 901 rc = -EBUSY;
902 } 902 }
903 unregister_early_external_interrupt(0x2401, sclp_check_handler, 903 unregister_external_interrupt(0x2401, sclp_check_handler);
904 &ext_int_info_hwc);
905 spin_unlock_irqrestore(&sclp_lock, flags); 904 spin_unlock_irqrestore(&sclp_lock, flags);
906 return rc; 905 return rc;
907} 906}
@@ -1064,8 +1063,7 @@ sclp_init(void)
1064 if (rc) 1063 if (rc)
1065 goto fail_init_state_uninitialized; 1064 goto fail_init_state_uninitialized;
1066 /* Register interrupt handler */ 1065 /* Register interrupt handler */
1067 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, 1066 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
1068 &ext_int_info_hwc);
1069 if (rc) 1067 if (rc)
1070 goto fail_unregister_reboot_notifier; 1068 goto fail_unregister_reboot_notifier;
1071 sclp_init_state = sclp_init_state_initialized; 1069 sclp_init_state = sclp_init_state_initialized;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index b497afe061cc..16e232a99fb7 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
33 int cpu; 33 int cpu;
34 struct sys_device *sysdev; 34 struct sys_device *sysdev;
35 35
36 s390_adjust_jiffies();
36 pr_warning("cpu capability changed.\n"); 37 pr_warning("cpu capability changed.\n");
37 get_online_cpus(); 38 get_online_cpus();
38 for_each_online_cpu(cpu) { 39 for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index b3a3e8e8656e..7978a0adeaf3 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,6 +14,7 @@
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 16
17#include <linux/kernel_stat.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> // for kernel parameters 19#include <linux/init.h> // for kernel parameters
19#include <linux/kmod.h> // for requesting modules 20#include <linux/kmod.h> // for requesting modules
@@ -1114,6 +1115,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1114 struct tape_request *request; 1115 struct tape_request *request;
1115 int rc; 1116 int rc;
1116 1117
1118 kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
1117 device = dev_get_drvdata(&cdev->dev); 1119 device = dev_get_drvdata(&cdev->dev);
1118 if (device == NULL) { 1120 if (device == NULL) {
1119 return; 1121 return;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index f7e4ae6bf15a..caef1757341d 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,6 +11,7 @@
11#define KMSG_COMPONENT "vmur" 11#define KMSG_COMPONENT "vmur"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/cdev.h> 15#include <linux/cdev.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16 17
@@ -302,6 +303,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
302{ 303{
303 struct urdev *urd; 304 struct urdev *urd;
304 305
306 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
305 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 307 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
306 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 308 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
307 irb->scsw.cmd.count); 309 irb->scsw.cmd.count);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 97b25d68e3e7..2864581d8ecb 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -67,6 +67,27 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
67} 67}
68 68
69/* 69/*
70 * Remove references from ccw devices to ccw group device and from
71 * ccw group device to ccw devices.
72 */
73static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
74{
75 struct ccw_device *cdev;
76 int i;
77
78 for (i = 0; i < gdev->count; i++) {
79 cdev = gdev->cdev[i];
80 if (!cdev)
81 continue;
82 spin_lock_irq(cdev->ccwlock);
83 dev_set_drvdata(&cdev->dev, NULL);
84 spin_unlock_irq(cdev->ccwlock);
85 gdev->cdev[i] = NULL;
86 put_device(&cdev->dev);
87 }
88}
89
90/*
70 * Provide an 'ungroup' attribute so the user can remove group devices no 91 * Provide an 'ungroup' attribute so the user can remove group devices no
71 * longer needed or accidentially created. Saves memory :) 92 * longer needed or accidentially created. Saves memory :)
72 */ 93 */
@@ -78,6 +99,7 @@ static void ccwgroup_ungroup_callback(struct device *dev)
78 if (device_is_registered(&gdev->dev)) { 99 if (device_is_registered(&gdev->dev)) {
79 __ccwgroup_remove_symlinks(gdev); 100 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev); 101 device_unregister(dev);
102 __ccwgroup_remove_cdev_refs(gdev);
81 } 103 }
82 mutex_unlock(&gdev->reg_mutex); 104 mutex_unlock(&gdev->reg_mutex);
83} 105}
@@ -116,21 +138,7 @@ static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
116static void 138static void
117ccwgroup_release (struct device *dev) 139ccwgroup_release (struct device *dev)
118{ 140{
119 struct ccwgroup_device *gdev; 141 kfree(to_ccwgroupdev(dev));
120 int i;
121
122 gdev = to_ccwgroupdev(dev);
123
124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
130 put_device(&gdev->cdev[i]->dev);
131 }
132 }
133 kfree(gdev);
134} 142}
135 143
136static int 144static int
@@ -639,6 +647,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
639 mutex_lock(&gdev->reg_mutex); 647 mutex_lock(&gdev->reg_mutex);
640 __ccwgroup_remove_symlinks(gdev); 648 __ccwgroup_remove_symlinks(gdev);
641 device_unregister(dev); 649 device_unregister(dev);
650 __ccwgroup_remove_cdev_refs(gdev);
642 mutex_unlock(&gdev->reg_mutex); 651 mutex_unlock(&gdev->reg_mutex);
643 put_device(dev); 652 put_device(dev);
644 } 653 }
@@ -660,25 +669,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
660 return 0; 669 return 0;
661} 670}
662 671
663static struct ccwgroup_device *
664__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
665{
666 struct ccwgroup_device *gdev;
667
668 gdev = dev_get_drvdata(&cdev->dev);
669 if (gdev) {
670 if (get_device(&gdev->dev)) {
671 mutex_lock(&gdev->reg_mutex);
672 if (device_is_registered(&gdev->dev))
673 return gdev;
674 mutex_unlock(&gdev->reg_mutex);
675 put_device(&gdev->dev);
676 }
677 return NULL;
678 }
679 return NULL;
680}
681
682/** 672/**
683 * ccwgroup_remove_ccwdev() - remove function for slave devices 673 * ccwgroup_remove_ccwdev() - remove function for slave devices
684 * @cdev: ccw device to be removed 674 * @cdev: ccw device to be removed
@@ -694,13 +684,25 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
694 /* Ignore offlining errors, device is gone anyway. */ 684 /* Ignore offlining errors, device is gone anyway. */
695 ccw_device_set_offline(cdev); 685 ccw_device_set_offline(cdev);
696 /* If one of its devices is gone, the whole group is done for. */ 686 /* If one of its devices is gone, the whole group is done for. */
697 gdev = __ccwgroup_get_gdev_by_cdev(cdev); 687 spin_lock_irq(cdev->ccwlock);
698 if (gdev) { 688 gdev = dev_get_drvdata(&cdev->dev);
689 if (!gdev) {
690 spin_unlock_irq(cdev->ccwlock);
691 return;
692 }
693 /* Get ccwgroup device reference for local processing. */
694 get_device(&gdev->dev);
695 spin_unlock_irq(cdev->ccwlock);
696 /* Unregister group device. */
697 mutex_lock(&gdev->reg_mutex);
698 if (device_is_registered(&gdev->dev)) {
699 __ccwgroup_remove_symlinks(gdev); 699 __ccwgroup_remove_symlinks(gdev);
700 device_unregister(&gdev->dev); 700 device_unregister(&gdev->dev);
701 mutex_unlock(&gdev->reg_mutex); 701 __ccwgroup_remove_cdev_refs(gdev);
702 put_device(&gdev->dev);
703 } 702 }
703 mutex_unlock(&gdev->reg_mutex);
704 /* Release ccwgroup device reference for local processing. */
705 put_device(&gdev->dev);
704} 706}
705 707
706MODULE_LICENSE("GPL"); 708MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1aaddea673e0..0689fcf23a11 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -695,6 +695,25 @@ out:
695 return ret; 695 return ret;
696} 696}
697 697
698int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
699 struct channel_path_desc_fmt1 *desc)
700{
701 struct chsc_response_struct *chsc_resp;
702 struct chsc_scpd *scpd_area;
703 int ret;
704
705 spin_lock_irq(&chsc_page_lock);
706 scpd_area = chsc_page;
707 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
708 if (ret)
709 goto out;
710 chsc_resp = (void *)&scpd_area->response;
711 memcpy(desc, &chsc_resp->data, sizeof(*desc));
712out:
713 spin_unlock_irq(&chsc_page_lock);
714 return ret;
715}
716
698static void 717static void
699chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 718chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
700 struct cmg_chars *chars) 719 struct cmg_chars *chars)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 6693f5e3176f..3f15b2aaeaea 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -35,6 +35,22 @@ struct channel_path_desc {
35 u8 chpp; 35 u8 chpp;
36} __attribute__ ((packed)); 36} __attribute__ ((packed));
37 37
38struct channel_path_desc_fmt1 {
39 u8 flags;
40 u8 lsn;
41 u8 desc;
42 u8 chpid;
43 u32:24;
44 u8 chpp;
45 u32 unused[3];
46 u16 mdc;
47 u16:13;
48 u8 r:1;
49 u8 s:1;
50 u8 f:1;
51 u32 zeros[2];
52} __attribute__ ((packed));
53
38struct channel_path; 54struct channel_path;
39 55
40struct css_chsc_char { 56struct css_chsc_char {
@@ -92,6 +108,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
92 int c, int m, void *page); 108 int c, int m, void *page);
93int chsc_determine_base_channel_path_desc(struct chp_id chpid, 109int chsc_determine_base_channel_path_desc(struct chp_id chpid,
94 struct channel_path_desc *desc); 110 struct channel_path_desc *desc);
111int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
112 struct channel_path_desc_fmt1 *desc);
95void chsc_chp_online(struct chp_id chpid); 113void chsc_chp_online(struct chp_id chpid);
96void chsc_chp_offline(struct chp_id chpid); 114void chsc_chp_offline(struct chp_id chpid);
97int chsc_get_channel_measurement_chars(struct channel_path *chp); 115int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 825951b6b83f..24d8e97355b9 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -618,6 +618,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
619{ 619{
620 struct subchannel_id mchk_schid; 620 struct subchannel_id mchk_schid;
621 struct subchannel *sch;
621 622
622 if (overflow) { 623 if (overflow) {
623 css_schedule_eval_all(); 624 css_schedule_eval_all();
@@ -637,6 +638,13 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
637 if (crw1) 638 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 639 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
639 640
641 if (crw0->erc == CRW_ERC_PMOD) {
642 sch = get_subchannel_by_schid(mchk_schid);
643 if (sch) {
644 css_update_ssd_info(sch);
645 put_device(&sch->dev);
646 }
647 }
640 /* 648 /*
641 * Since we are always presented with IPI in the CRW, we have to 649 * Since we are always presented with IPI in the CRW, we have to
642 * use stsch() to find out if the subchannel in question has come 650 * use stsch() to find out if the subchannel in question has come
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da84543dfe9..651976b54af8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -687,6 +687,46 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
687EXPORT_SYMBOL(ccw_device_tm_start_timeout); 687EXPORT_SYMBOL(ccw_device_tm_start_timeout);
688 688
689/** 689/**
690 * ccw_device_get_mdc - accumulate max data count
691 * @cdev: ccw device for which the max data count is accumulated
692 * @mask: mask of paths to use
693 *
694 * Return the number of 64K-bytes blocks all paths at least support
695 * for a transport command. Return values <= 0 indicate failures.
696 */
697int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
698{
699 struct subchannel *sch = to_subchannel(cdev->dev.parent);
700 struct channel_path_desc_fmt1 desc;
701 struct chp_id chpid;
702 int mdc = 0, ret, i;
703
704 /* Adjust requested path mask to excluded varied off paths. */
705 if (mask)
706 mask &= sch->lpm;
707 else
708 mask = sch->lpm;
709
710 chp_id_init(&chpid);
711 for (i = 0; i < 8; i++) {
712 if (!(mask & (0x80 >> i)))
713 continue;
714 chpid.id = sch->schib.pmcw.chpid[i];
715 ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
716 if (ret)
717 return ret;
718 if (!desc.f)
719 return 0;
720 if (!desc.r)
721 mdc = 1;
722 mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
723 }
724
725 return mdc;
726}
727EXPORT_SYMBOL(ccw_device_get_mdc);
728
729/**
690 * ccw_device_tm_intrg - perform interrogate function 730 * ccw_device_tm_intrg - perform interrogate function
691 * @cdev: ccw device on which to perform the interrogate function 731 * @cdev: ccw device on which to perform the interrogate function
692 * 732 *
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index a0ae29564774..358ee16d10a2 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) 93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{ 94{
95 size_t len; 95 size_t len;
96 int cross_count;
96 97
97 /* Main data. */ 98 /* Main data. */
98 len = sizeof(struct itcw); 99 len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
105 /* TSB */ sizeof(struct tsb) + 106 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); 107 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 } 108 }
109
108 /* Maximum required alignment padding. */ 110 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; 111 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */ 112
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) 113 /* TIDAW lists may not cross a 4k boundary. To cross a
112 len += max(max_tidaws, intrg_max_tidaws) * 114 * boundary we need to add a TTIC TIDAW. We need to reserve
113 sizeof(struct tidaw) - 1; 115 * one additional TIDAW for a TTIC that we may need to add due
116 * to the placement of the data chunk in memory, and a further
117 * TIDAW for each page boundary that the TIDAW list may cross
118 * due to it's own size.
119 */
120 if (max_tidaws) {
121 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
122 >> PAGE_SHIFT);
123 len += cross_count * sizeof(struct tidaw);
124 }
125 if (intrg_max_tidaws) {
126 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
127 >> PAGE_SHIFT);
128 len += cross_count * sizeof(struct tidaw);
129 }
114 return len; 130 return len;
115} 131}
116EXPORT_SYMBOL(itcw_calc_size); 132EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
165 void *chunk; 181 void *chunk;
166 addr_t start; 182 addr_t start;
167 addr_t end; 183 addr_t end;
184 int cross_count;
168 185
169 /* Check for 2G limit. */ 186 /* Check for 2G limit. */
170 start = (addr_t) buffer; 187 start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
177 if (IS_ERR(chunk)) 194 if (IS_ERR(chunk))
178 return chunk; 195 return chunk;
179 itcw = chunk; 196 itcw = chunk;
180 itcw->max_tidaws = max_tidaws; 197 /* allow for TTIC tidaws that may be needed to cross a page boundary */
181 itcw->intrg_max_tidaws = intrg_max_tidaws; 198 cross_count = 0;
199 if (max_tidaws)
200 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
201 >> PAGE_SHIFT);
202 itcw->max_tidaws = max_tidaws + cross_count;
203 cross_count = 0;
204 if (intrg_max_tidaws)
205 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
206 >> PAGE_SHIFT);
207 itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
182 /* Main TCW. */ 208 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); 209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk)) 210 if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
198 /* Data TIDAL. */ 224 /* Data TIDAL. */
199 if (max_tidaws > 0) { 225 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 226 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1); 227 itcw->max_tidaws, 16, 0);
202 if (IS_ERR(chunk)) 228 if (IS_ERR(chunk))
203 return chunk; 229 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1); 230 tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
206 /* Interrogate data TIDAL. */ 232 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) { 233 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 234 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1); 235 itcw->intrg_max_tidaws, 16, 0);
210 if (IS_ERR(chunk)) 236 if (IS_ERR(chunk))
211 return chunk; 237 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1); 238 tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the 309 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space. 310 * available space.
285 * 311 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The 312 * Note: TTIC tidaws are automatically added when needed, so explicitly calling
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. 313 * this interface with the TTIC flag is not supported. The last-tidaw flag
314 * for the last tidaw in the list will be set by itcw_finalize.
288 */ 315 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) 316struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{ 317{
318 struct tidaw *following;
319
291 if (itcw->num_tidaws >= itcw->max_tidaws) 320 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC); 321 return ERR_PTR(-ENOSPC);
322 /*
323 * Is the tidaw, which follows the one we are about to fill, on the next
324 * page? Then we have to insert a TTIC tidaw first, that points to the
325 * tidaw on the new page.
326 */
327 following = ((struct tidaw *) tcw_get_data(itcw->tcw))
328 + itcw->num_tidaws + 1;
329 if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
330 tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
331 TIDAW_FLAGS_TTIC, following, 0);
332 if (itcw->num_tidaws >= itcw->max_tidaws)
333 return ERR_PTR(-ENOSPC);
334 }
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); 335 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294} 336}
295EXPORT_SYMBOL(itcw_add_tidaw); 337EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 0f4ef8769a3d..7bc643f3f5ab 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -91,6 +91,12 @@ enum qdio_irq_states {
91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ 91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ 92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
93 93
94/* SIGA flags */
95#define QDIO_SIGA_WRITE 0x00
96#define QDIO_SIGA_READ 0x01
97#define QDIO_SIGA_SYNC 0x02
98#define QDIO_SIGA_QEBSM_FLAG 0x80
99
94#ifdef CONFIG_64BIT 100#ifdef CONFIG_64BIT
95static inline int do_sqbs(u64 token, unsigned char state, int queue, 101static inline int do_sqbs(u64 token, unsigned char state, int queue,
96 int *start, int *count) 102 int *start, int *count)
@@ -142,10 +148,9 @@ struct siga_flag {
142 u8 input:1; 148 u8 input:1;
143 u8 output:1; 149 u8 output:1;
144 u8 sync:1; 150 u8 sync:1;
145 u8 no_sync_ti:1; 151 u8 sync_after_ai:1;
146 u8 no_sync_out_ti:1; 152 u8 sync_out_after_pci:1;
147 u8 no_sync_out_pci:1; 153 u8:3;
148 u8:2;
149} __attribute__ ((packed)); 154} __attribute__ ((packed));
150 155
151struct chsc_ssqd_area { 156struct chsc_ssqd_area {
@@ -202,6 +207,7 @@ struct qdio_dev_perf_stat {
202 unsigned int inbound_queue_full; 207 unsigned int inbound_queue_full;
203 unsigned int outbound_call; 208 unsigned int outbound_call;
204 unsigned int outbound_handler; 209 unsigned int outbound_handler;
210 unsigned int outbound_queue_full;
205 unsigned int fast_requeue; 211 unsigned int fast_requeue;
206 unsigned int target_full; 212 unsigned int target_full;
207 unsigned int eqbs; 213 unsigned int eqbs;
@@ -245,10 +251,10 @@ struct qdio_input_q {
245struct qdio_output_q { 251struct qdio_output_q {
246 /* PCIs are enabled for the queue */ 252 /* PCIs are enabled for the queue */
247 int pci_out_enabled; 253 int pci_out_enabled;
248 /* IQDIO: output multiple buffers (enhanced SIGA) */
249 int use_enh_siga;
250 /* timer to check for more outbound work */ 254 /* timer to check for more outbound work */
251 struct timer_list timer; 255 struct timer_list timer;
256 /* used SBALs before tasklet schedule */
257 int scan_threshold;
252}; 258};
253 259
254/* 260/*
@@ -383,12 +389,13 @@ static inline int multicast_outbound(struct qdio_q *q)
383 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 389 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
384#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 390#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
385 391
386#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
387#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
388#define need_siga_in(q) (q->irq_ptr->siga_flag.input) 392#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
389#define need_siga_out(q) (q->irq_ptr->siga_flag.output) 393#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
390#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) 394#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
391#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) 395#define need_siga_sync_after_ai(q) \
396 (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
397#define need_siga_sync_out_after_pci(q) \
398 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
392 399
393#define for_each_input_queue(irq_ptr, q, i) \ 400#define for_each_input_queue(irq_ptr, q, i) \
394 for (i = 0, q = irq_ptr->input_qs[0]; \ 401 for (i = 0, q = irq_ptr->input_qs[0]; \
@@ -423,9 +430,9 @@ struct indicator_t {
423 430
424extern struct indicator_t *q_indicators; 431extern struct indicator_t *q_indicators;
425 432
426static inline int shared_ind(struct qdio_irq *irq_ptr) 433static inline int shared_ind(u32 *dsci)
427{ 434{
428 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 435 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
429} 436}
430 437
431/* prototypes for thin interrupt */ 438/* prototypes for thin interrupt */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 28868e7471a5..f8b03a636e49 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -151,6 +151,7 @@ static char *qperf_names[] = {
151 "Inbound queue full", 151 "Inbound queue full",
152 "Outbound calls", 152 "Outbound calls",
153 "Outbound handler", 153 "Outbound handler",
154 "Outbound queue full",
154 "Outbound fast_requeue", 155 "Outbound fast_requeue",
155 "Outbound target_full", 156 "Outbound target_full",
156 "QEBSM eqbs", 157 "QEBSM eqbs",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5fcfa7f9e9ef..e9fff2b9bce2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18#include <asm/debug.h> 19#include <asm/debug.h>
19#include <asm/qdio.h> 20#include <asm/qdio.h>
@@ -29,11 +30,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29MODULE_DESCRIPTION("QDIO base support"); 30MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
32static inline int do_siga_sync(struct subchannel_id schid, 33static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask) 34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
34{ 36{
35 register unsigned long __fc asm ("0") = 2; 37 register unsigned long __fc asm ("0") = fc;
36 register struct subchannel_id __schid asm ("1") = schid; 38 register unsigned long __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask; 39 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask; 40 register unsigned long in asm ("3") = in_mask;
39 int cc; 41 int cc;
@@ -47,10 +49,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
47 return cc; 49 return cc;
48} 50}
49 51
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) 52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
51{ 54{
52 register unsigned long __fc asm ("0") = 1; 55 register unsigned long __fc asm ("0") = fc;
53 register struct subchannel_id __schid asm ("1") = schid; 56 register unsigned long __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask; 57 register unsigned long __mask asm ("2") = mask;
55 int cc; 58 int cc;
56 59
@@ -279,16 +282,20 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 283 unsigned int input)
281{ 284{
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
282 int cc; 287 int cc;
283 288
284 if (!need_siga_sync(q))
285 return 0;
286
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qperf_inc(q, siga_sync); 290 qperf_inc(q, siga_sync);
289 291
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 292 if (is_qebsm(q)) {
291 if (cc) 293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
295 }
296
297 cc = do_siga_sync(schid, output, input, fc);
298 if (unlikely(cc))
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
293 return cc; 300 return cc;
294} 301}
@@ -301,38 +308,22 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
301 return qdio_siga_sync(q, q->mask, 0); 308 return qdio_siga_sync(q, q->mask, 0);
302} 309}
303 310
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{ 312{
316 unsigned long schid; 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = 0; 314 unsigned int fc = QDIO_SIGA_WRITE;
318 u64 start_time = 0; 315 u64 start_time = 0;
319 int cc; 316 int cc;
320 317
321 if (q->u.out.use_enh_siga)
322 fc = 3;
323
324 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
325 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
326 fc |= 0x80; 320 fc |= QDIO_SIGA_QEBSM_FLAG;
327 } 321 }
328 else
329 schid = *((u32 *)&q->irq_ptr->schid);
330
331again: 322again:
332 cc = do_siga_output(schid, q->mask, busy_bit, fc); 323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 324
334 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
335 if (*busy_bit) { 326 if (unlikely(*busy_bit)) {
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 328
338 if (!start_time) { 329 if (!start_time) {
@@ -347,32 +338,41 @@ again:
347 338
348static inline int qdio_siga_input(struct qdio_q *q) 339static inline int qdio_siga_input(struct qdio_q *q)
349{ 340{
341 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 unsigned int fc = QDIO_SIGA_READ;
350 int cc; 343 int cc;
351 344
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 345 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qperf_inc(q, siga_read); 346 qperf_inc(q, siga_read);
354 347
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 348 if (is_qebsm(q)) {
356 if (cc) 349 schid = q->irq_ptr->sch_token;
350 fc |= QDIO_SIGA_QEBSM_FLAG;
351 }
352
353 cc = do_siga_input(schid, q->mask, fc);
354 if (unlikely(cc))
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
358 return cc; 356 return cc;
359} 357}
360 358
361static inline void qdio_sync_after_thinint(struct qdio_q *q) 359#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361
362static inline void qdio_sync_queues(struct qdio_q *q)
362{ 363{
363 if (pci_out_supported(q)) { 364 /* PCI capable outbound queues will also be scanned so sync them too */
364 if (need_siga_sync_thinint(q)) 365 if (pci_out_supported(q))
365 qdio_siga_sync_all(q); 366 qdio_siga_sync_all(q);
366 else if (need_siga_sync_out_thinint(q)) 367 else
367 qdio_siga_sync_out(q);
368 } else
369 qdio_siga_sync_q(q); 368 qdio_siga_sync_q(q);
370} 369}
371 370
372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 371int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state) 372 unsigned char *state)
374{ 373{
375 qdio_siga_sync_q(q); 374 if (need_siga_sync(q))
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0); 376 return get_buf_states(q, bufnr, state, 1, 0);
377} 377}
378 378
@@ -549,7 +549,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
549 if (!atomic_read(&q->nr_buf_used)) 549 if (!atomic_read(&q->nr_buf_used))
550 return 1; 550 return 1;
551 551
552 qdio_siga_sync_q(q); 552 if (need_siga_sync(q))
553 qdio_siga_sync_q(q);
553 get_buf_state(q, q->first_to_check, &state, 0); 554 get_buf_state(q, q->first_to_check, &state, 0);
554 555
555 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 556 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -644,9 +645,12 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
644 int count, stop; 645 int count, stop;
645 unsigned char state; 646 unsigned char state;
646 647
647 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 648 if (need_siga_sync(q))
648 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 649 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
649 qdio_siga_sync_q(q); 650 !pci_out_supported(q)) ||
651 (queue_type(q) == QDIO_IQDIO_QFMT &&
652 multicast_outbound(q)))
653 qdio_siga_sync_q(q);
650 654
651 /* 655 /*
652 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 656 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -818,7 +822,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
818static void __tiqdio_inbound_processing(struct qdio_q *q) 822static void __tiqdio_inbound_processing(struct qdio_q *q)
819{ 823{
820 qperf_inc(q, tasklet_inbound); 824 qperf_inc(q, tasklet_inbound);
821 qdio_sync_after_thinint(q); 825 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
826 qdio_sync_queues(q);
822 827
823 /* 828 /*
824 * The interrupt could be caused by a PCI request. Check the 829 * The interrupt could be caused by a PCI request. Check the
@@ -898,16 +903,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
898 tasklet_schedule(&q->tasklet); 903 tasklet_schedule(&q->tasklet);
899 } 904 }
900 905
901 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 906 if (!pci_out_supported(q))
902 return; 907 return;
903 908
904 for_each_output_queue(irq_ptr, q, i) { 909 for_each_output_queue(irq_ptr, q, i) {
905 if (qdio_outbound_q_done(q)) 910 if (qdio_outbound_q_done(q))
906 continue; 911 continue;
907 912 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
908 if (!siga_syncs_out_pci(q))
909 qdio_siga_sync_q(q); 913 qdio_siga_sync_q(q);
910
911 tasklet_schedule(&q->tasklet); 914 tasklet_schedule(&q->tasklet);
912 } 915 }
913} 916}
@@ -970,6 +973,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
970 return; 973 return;
971 } 974 }
972 975
976 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
973 if (irq_ptr->perf_stat_enabled) 977 if (irq_ptr->perf_stat_enabled)
974 irq_ptr->perf_stat.qdio_int++; 978 irq_ptr->perf_stat.qdio_int++;
975 979
@@ -1273,7 +1277,6 @@ int qdio_establish(struct qdio_initialize *init_data)
1273 } 1277 }
1274 1278
1275 qdio_setup_ssqd_info(irq_ptr); 1279 qdio_setup_ssqd_info(irq_ptr);
1276 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1277 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1280 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1278 1281
1279 /* qebsm is now setup if available, initialize buffer states */ 1282 /* qebsm is now setup if available, initialize buffer states */
@@ -1445,52 +1448,38 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1445 used = atomic_add_return(count, &q->nr_buf_used); 1448 used = atomic_add_return(count, &q->nr_buf_used);
1446 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1449 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1447 1450
1451 if (used == QDIO_MAX_BUFFERS_PER_Q)
1452 qperf_inc(q, outbound_queue_full);
1453
1448 if (callflags & QDIO_FLAG_PCI_OUT) { 1454 if (callflags & QDIO_FLAG_PCI_OUT) {
1449 q->u.out.pci_out_enabled = 1; 1455 q->u.out.pci_out_enabled = 1;
1450 qperf_inc(q, pci_request_int); 1456 qperf_inc(q, pci_request_int);
1451 } 1457 } else
1452 else
1453 q->u.out.pci_out_enabled = 0; 1458 q->u.out.pci_out_enabled = 0;
1454 1459
1455 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1460 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1456 if (multicast_outbound(q)) 1461 /* One SIGA-W per buffer required for unicast HiperSockets. */
1462 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1463
1464 rc = qdio_kick_outbound_q(q);
1465 } else if (need_siga_sync(q)) {
1466 rc = qdio_siga_sync_q(q);
1467 } else {
1468 /* try to fast requeue buffers */
1469 get_buf_state(q, prev_buf(bufnr), &state, 0);
1470 if (state != SLSB_CU_OUTPUT_PRIMED)
1457 rc = qdio_kick_outbound_q(q); 1471 rc = qdio_kick_outbound_q(q);
1458 else 1472 else
1459 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1473 qperf_inc(q, fast_requeue);
1460 (count > 1) &&
1461 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1462 /* exploit enhanced SIGA */
1463 q->u.out.use_enh_siga = 1;
1464 rc = qdio_kick_outbound_q(q);
1465 } else {
1466 /*
1467 * One siga-w per buffer required for unicast
1468 * HiperSockets.
1469 */
1470 q->u.out.use_enh_siga = 0;
1471 while (count--) {
1472 rc = qdio_kick_outbound_q(q);
1473 if (rc)
1474 goto out;
1475 }
1476 }
1477 goto out;
1478 }
1479
1480 if (need_siga_sync(q)) {
1481 qdio_siga_sync_q(q);
1482 goto out;
1483 } 1474 }
1484 1475
1485 /* try to fast requeue buffers */ 1476 /* in case of SIGA errors we must process the error immediately */
1486 get_buf_state(q, prev_buf(bufnr), &state, 0); 1477 if (used >= q->u.out.scan_threshold || rc)
1487 if (state != SLSB_CU_OUTPUT_PRIMED) 1478 tasklet_schedule(&q->tasklet);
1488 rc = qdio_kick_outbound_q(q);
1489 else 1479 else
1490 qperf_inc(q, fast_requeue); 1480 /* free the SBALs in case of no further traffic */
1491 1481 if (!timer_pending(&q->u.out.timer))
1492out: 1482 mod_timer(&q->u.out.timer, jiffies + HZ);
1493 tasklet_schedule(&q->tasklet);
1494 return rc; 1483 return rc;
1495} 1484}
1496 1485
@@ -1550,7 +1539,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1550 1539
1551 WARN_ON(queue_irqs_enabled(q)); 1540 WARN_ON(queue_irqs_enabled(q));
1552 1541
1553 if (!shared_ind(q->irq_ptr)) 1542 if (!shared_ind(q->irq_ptr->dsci))
1554 xchg(q->irq_ptr->dsci, 0); 1543 xchg(q->irq_ptr->dsci, 0);
1555 1544
1556 qdio_stop_polling(q); 1545 qdio_stop_polling(q);
@@ -1560,7 +1549,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1560 * We need to check again to not lose initiative after 1549 * We need to check again to not lose initiative after
1561 * resetting the ACK state. 1550 * resetting the ACK state.
1562 */ 1551 */
1563 if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) 1552 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1564 goto rescan; 1553 goto rescan;
1565 if (!qdio_inbound_q_done(q)) 1554 if (!qdio_inbound_q_done(q))
1566 goto rescan; 1555 goto rescan;
@@ -1600,12 +1589,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1600 q = irq_ptr->input_qs[nr]; 1589 q = irq_ptr->input_qs[nr];
1601 WARN_ON(queue_irqs_enabled(q)); 1590 WARN_ON(queue_irqs_enabled(q));
1602 1591
1603 qdio_sync_after_thinint(q);
1604
1605 /* 1592 /*
1606 * The interrupt could be caused by a PCI request. Check the 1593 * Cannot rely on automatic sync after interrupt since queues may
1607 * PCI capable outbound queues. 1594 * also be examined without interrupt.
1608 */ 1595 */
1596 if (need_siga_sync(q))
1597 qdio_sync_queues(q);
1598
1599 /* check the PCI capable outbound queues. */
1609 qdio_check_outbound_after_thinint(q); 1600 qdio_check_outbound_after_thinint(q);
1610 1601
1611 if (!qdio_inbound_q_moved(q)) 1602 if (!qdio_inbound_q_moved(q))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a13cf7ec64b2..89107d0938c4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -178,6 +178,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
179 179
180 q->is_input_q = 0; 180 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold;
181 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 182 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
182 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 183 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
183 184
@@ -196,14 +197,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
196 irq_ptr->siga_flag.output = 1; 197 irq_ptr->siga_flag.output = 1;
197 if (qdioac & AC1_SIGA_SYNC_NEEDED) 198 if (qdioac & AC1_SIGA_SYNC_NEEDED)
198 irq_ptr->siga_flag.sync = 1; 199 irq_ptr->siga_flag.sync = 1;
199 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) 200 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
200 irq_ptr->siga_flag.no_sync_ti = 1; 201 irq_ptr->siga_flag.sync_after_ai = 1;
201 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) 202 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
202 irq_ptr->siga_flag.no_sync_out_pci = 1; 203 irq_ptr->siga_flag.sync_out_after_pci = 1;
203
204 if (irq_ptr->siga_flag.no_sync_out_pci &&
205 irq_ptr->siga_flag.no_sync_ti)
206 irq_ptr->siga_flag.no_sync_out_ti = 1;
207} 204}
208 205
209static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 206static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -451,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
451 char s[80]; 448 char s[80];
452 449
453 snprintf(s, 80, "qdio: %s %s on SC %x using " 450 snprintf(s, 80, "qdio: %s %s on SC %x using "
454 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", 451 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
455 dev_name(&cdev->dev), 452 dev_name(&cdev->dev),
456 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 453 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
457 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 454 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -463,9 +460,8 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
463 (irq_ptr->siga_flag.input) ? "R" : " ", 460 (irq_ptr->siga_flag.input) ? "R" : " ",
464 (irq_ptr->siga_flag.output) ? "W" : " ", 461 (irq_ptr->siga_flag.output) ? "W" : " ",
465 (irq_ptr->siga_flag.sync) ? "S" : " ", 462 (irq_ptr->siga_flag.sync) ? "S" : " ",
466 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", 463 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
467 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", 464 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
468 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
469 printk(KERN_INFO "%s", s); 465 printk(KERN_INFO "%s", s);
470} 466}
471 467
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5d9c66627b6e..5c4e741d8221 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/kernel_stat.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/debug.h> 13#include <asm/debug.h>
13#include <asm/qdio.h> 14#include <asm/qdio.h>
@@ -35,22 +36,8 @@ static u8 *tiqdio_alsi;
35 36
36struct indicator_t *q_indicators; 37struct indicator_t *q_indicators;
37 38
38static int css_qdio_omit_svs;
39
40static u64 last_ai_time; 39static u64 last_ai_time;
41 40
42static inline unsigned long do_clear_global_summary(void)
43{
44 register unsigned long __fn asm("1") = 3;
45 register unsigned long __tmp asm("2");
46 register unsigned long __time asm("3");
47
48 asm volatile(
49 " .insn rre,0xb2650000,2,0"
50 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
51 return __time;
52}
53
54/* returns addr for the device state change indicator */ 41/* returns addr for the device state change indicator */
55static u32 *get_indicator(void) 42static u32 *get_indicator(void)
56{ 43{
@@ -83,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
83 struct qdio_q *q; 70 struct qdio_q *q;
84 int i; 71 int i;
85 72
86 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
87 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
88 css_qdio_omit_svs = 1;
89
90 mutex_lock(&tiq_list_lock); 73 mutex_lock(&tiq_list_lock);
91 for_each_input_queue(irq_ptr, q, i) 74 for_each_input_queue(irq_ptr, q, i)
92 list_add_rcu(&q->entry, &tiq_list); 75 list_add_rcu(&q->entry, &tiq_list);
@@ -112,9 +95,9 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112 } 95 }
113} 96}
114 97
115static inline int shared_ind_used(void) 98static inline u32 shared_ind_set(void)
116{ 99{
117 return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); 100 return q_indicators[TIQDIO_SHARED_IND].ind;
118} 101}
119 102
120/** 103/**
@@ -124,20 +107,11 @@ static inline int shared_ind_used(void)
124 */ 107 */
125static void tiqdio_thinint_handler(void *alsi, void *data) 108static void tiqdio_thinint_handler(void *alsi, void *data)
126{ 109{
110 u32 si_used = shared_ind_set();
127 struct qdio_q *q; 111 struct qdio_q *q;
128 112
129 last_ai_time = S390_lowcore.int_clock; 113 last_ai_time = S390_lowcore.int_clock;
130 114 kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
131 /*
132 * SVS only when needed: issue SVS to benefit from iqdio interrupt
133 * avoidance (SVS clears adapter interrupt suppression overwrite).
134 */
135 if (!css_qdio_omit_svs)
136 do_clear_global_summary();
137
138 /* reset local summary indicator */
139 if (shared_ind_used())
140 xchg(tiqdio_alsi, 0);
141 115
142 /* protect tiq_list entries, only changed in activate or shutdown */ 116 /* protect tiq_list entries, only changed in activate or shutdown */
143 rcu_read_lock(); 117 rcu_read_lock();
@@ -146,7 +120,10 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
146 list_for_each_entry_rcu(q, &tiq_list, entry) { 120 list_for_each_entry_rcu(q, &tiq_list, entry) {
147 121
148 /* only process queues from changed sets */ 122 /* only process queues from changed sets */
149 if (!*q->irq_ptr->dsci) 123 if (unlikely(shared_ind(q->irq_ptr->dsci))) {
124 if (!si_used)
125 continue;
126 } else if (!*q->irq_ptr->dsci)
150 continue; 127 continue;
151 128
152 if (q->u.in.queue_start_poll) { 129 if (q->u.in.queue_start_poll) {
@@ -162,7 +139,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
162 q->irq_ptr->int_parm); 139 q->irq_ptr->int_parm);
163 } else { 140 } else {
164 /* only clear it if the indicator is non-shared */ 141 /* only clear it if the indicator is non-shared */
165 if (!shared_ind(q->irq_ptr)) 142 if (!shared_ind(q->irq_ptr->dsci))
166 xchg(q->irq_ptr->dsci, 0); 143 xchg(q->irq_ptr->dsci, 0);
167 /* 144 /*
168 * Call inbound processing but not directly 145 * Call inbound processing but not directly
@@ -178,13 +155,8 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
178 * If the shared indicator was used clear it now after all queues 155 * If the shared indicator was used clear it now after all queues
179 * were processed. 156 * were processed.
180 */ 157 */
181 if (shared_ind_used()) { 158 if (si_used && shared_ind_set())
182 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 159 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
183
184 /* prevent racing */
185 if (*tiqdio_alsi)
186 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
187 }
188} 160}
189 161
190static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 162static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -269,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
269{ 241{
270 if (!is_thinint_irq(irq_ptr)) 242 if (!is_thinint_irq(irq_ptr))
271 return 0; 243 return 0;
272
273 /* Check for aif time delay disablement. If installed,
274 * omit SVS even under LPAR
275 */
276 if (css_general_characteristics.aif_tdd)
277 css_qdio_omit_svs = 1;
278 return set_subchannel_ind(irq_ptr, 0); 244 return set_subchannel_ind(irq_ptr, 0);
279} 245}
280 246
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8fd8c62455e9..67302b944ab3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,6 +27,7 @@
27#define KMSG_COMPONENT "ap" 27#define KMSG_COMPONENT "ap"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 29
30#include <linux/kernel_stat.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -154,7 +155,7 @@ static inline int ap_instructions_available(void)
154 */ 155 */
155static int ap_interrupts_available(void) 156static int ap_interrupts_available(void)
156{ 157{
157 return test_facility(1) && test_facility(2); 158 return test_facility(2) && test_facility(65);
158} 159}
159 160
160/** 161/**
@@ -221,6 +222,69 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
221} 222}
222#endif 223#endif
223 224
225static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
226 int *support)
227{
228 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
229 register struct ap_queue_status reg1 asm ("1");
230 register unsigned long reg2 asm ("2") = 0UL;
231
232 asm volatile(
233 ".long 0xb2af0000\n"
234 "0: la %1,0\n"
235 "1:\n"
236 EX_TABLE(0b, 1b)
237 : "+d" (reg0), "=d" (reg1), "=d" (reg2)
238 :
239 : "cc");
240
241 if (reg2 & 0x6000000000000000ULL)
242 *support = 1;
243 else
244 *support = 0;
245
246 return reg1;
247}
248
249/**
250 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
251 * support.
252 * @qid: The AP queue number
253 *
254 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
255 */
256int ap_4096_commands_available(ap_qid_t qid)
257{
258 struct ap_queue_status status;
259 int i, support = 0;
260 status = __ap_4096_commands_available(qid, &support);
261
262 for (i = 0; i < AP_MAX_RESET; i++) {
263 switch (status.response_code) {
264 case AP_RESPONSE_NORMAL:
265 return support;
266 case AP_RESPONSE_RESET_IN_PROGRESS:
267 case AP_RESPONSE_BUSY:
268 break;
269 case AP_RESPONSE_Q_NOT_AVAIL:
270 case AP_RESPONSE_DECONFIGURED:
271 case AP_RESPONSE_CHECKSTOPPED:
272 case AP_RESPONSE_INVALID_ADDRESS:
273 return 0;
274 case AP_RESPONSE_OTHERWISE_CHANGED:
275 break;
276 default:
277 break;
278 }
279 if (i < AP_MAX_RESET - 1) {
280 udelay(5);
281 status = __ap_4096_commands_available(qid, &support);
282 }
283 }
284 return support;
285}
286EXPORT_SYMBOL(ap_4096_commands_available);
287
224/** 288/**
225 * ap_queue_enable_interruption(): Enable interruption on an AP. 289 * ap_queue_enable_interruption(): Enable interruption on an AP.
226 * @qid: The AP queue number 290 * @qid: The AP queue number
@@ -1042,6 +1106,7 @@ out:
1042 1106
1043static void ap_interrupt_handler(void *unused1, void *unused2) 1107static void ap_interrupt_handler(void *unused1, void *unused2)
1044{ 1108{
1109 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1045 tasklet_schedule(&ap_tasklet); 1110 tasklet_schedule(&ap_tasklet);
1046} 1111}
1047 1112
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4785d07cd447..08b9738285b4 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -196,4 +196,6 @@ void ap_flush_queue(struct ap_device *ap_dev);
196int ap_module_init(void); 196int ap_module_init(void);
197void ap_module_exit(void); 197void ap_module_exit(void);
198 198
199int ap_4096_commands_available(ap_qid_t qid);
200
199#endif /* _AP_BUS_H_ */ 201#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 7fca9c10ffcf..8e65447f76b7 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -396,8 +396,15 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
396 if (copied == 0) { 396 if (copied == 0) {
397 unsigned int len; 397 unsigned int len;
398 spin_unlock_bh(&zcrypt_device_lock); 398 spin_unlock_bh(&zcrypt_device_lock);
399 /* len is max 256 / 2 - 120 = 8 */ 399 /* len is max 256 / 2 - 120 = 8
400 len = crt->inputdatalength / 2 - 120; 400 * For bigger device just assume len of leading
401 * 0s is 8 as stated in the requirements for
402 * ica_rsa_modexpo_crt struct in zcrypt.h.
403 */
404 if (crt->inputdatalength <= 256)
405 len = crt->inputdatalength / 2 - 120;
406 else
407 len = 8;
401 if (len > sizeof(z1)) 408 if (len > sizeof(z1))
402 return -EFAULT; 409 return -EFAULT;
403 z1 = z2 = z3 = 0; 410 z1 = z2 = z3 = 0;
@@ -405,6 +412,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
405 copy_from_user(&z2, crt->bp_key, len) || 412 copy_from_user(&z2, crt->bp_key, len) ||
406 copy_from_user(&z3, crt->u_mult_inv, len)) 413 copy_from_user(&z3, crt->u_mult_inv, len))
407 return -EFAULT; 414 return -EFAULT;
415 z1 = z2 = z3 = 0;
408 copied = 1; 416 copied = 1;
409 /* 417 /*
410 * We have to restart device lookup - 418 * We have to restart device lookup -
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 8e7ffbf2466c..88ebd114735b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -109,6 +109,7 @@ struct zcrypt_device {
109 int request_count; /* # current requests. */ 109 int request_count; /* # current requests. */
110 110
111 struct ap_message reply; /* Per-device reply structure. */ 111 struct ap_message reply; /* Per-device reply structure. */
112 int max_exp_bit_length;
112}; 113};
113 114
114struct zcrypt_device *zcrypt_device_alloc(size_t); 115struct zcrypt_device *zcrypt_device_alloc(size_t);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 9c409efa1ecf..2176d00b395e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -41,7 +41,7 @@
41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE 43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
44#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE 44#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
45 45
46#define CEX2A_SPEED_RATING 970 46#define CEX2A_SPEED_RATING 970
47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ 47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
@@ -49,8 +49,10 @@
49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
51 51
52#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE 52#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
53#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE 53 * (max outputdatalength) +
54 * type80_hdr*/
55#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
54 56
55#define CEX2A_CLEANUP_TIME (15*HZ) 57#define CEX2A_CLEANUP_TIME (15*HZ)
56#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 58#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
@@ -110,7 +112,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
110 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; 112 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
111 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; 113 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
112 inp = meb1->message + sizeof(meb1->message) - mod_len; 114 inp = meb1->message + sizeof(meb1->message) - mod_len;
113 } else { 115 } else if (mod_len <= 256) {
114 struct type50_meb2_msg *meb2 = ap_msg->message; 116 struct type50_meb2_msg *meb2 = ap_msg->message;
115 memset(meb2, 0, sizeof(*meb2)); 117 memset(meb2, 0, sizeof(*meb2));
116 ap_msg->length = sizeof(*meb2); 118 ap_msg->length = sizeof(*meb2);
@@ -120,6 +122,17 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
120 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; 122 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
121 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; 123 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
122 inp = meb2->message + sizeof(meb2->message) - mod_len; 124 inp = meb2->message + sizeof(meb2->message) - mod_len;
125 } else {
126 /* mod_len > 256 = 4096 bit RSA Key */
127 struct type50_meb3_msg *meb3 = ap_msg->message;
128 memset(meb3, 0, sizeof(*meb3));
129 ap_msg->length = sizeof(*meb3);
130 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
131 meb3->header.msg_len = sizeof(*meb3);
132 meb3->keyblock_type = TYPE50_MEB3_FMT;
133 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
134 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
135 inp = meb3->message + sizeof(meb3->message) - mod_len;
123 } 136 }
124 137
125 if (copy_from_user(mod, mex->n_modulus, mod_len) || 138 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
@@ -142,7 +155,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
142 struct ap_message *ap_msg, 155 struct ap_message *ap_msg,
143 struct ica_rsa_modexpo_crt *crt) 156 struct ica_rsa_modexpo_crt *crt)
144{ 157{
145 int mod_len, short_len, long_len, long_offset; 158 int mod_len, short_len, long_len, long_offset, limit;
146 unsigned char *p, *q, *dp, *dq, *u, *inp; 159 unsigned char *p, *q, *dp, *dq, *u, *inp;
147 160
148 mod_len = crt->inputdatalength; 161 mod_len = crt->inputdatalength;
@@ -152,14 +165,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
152 /* 165 /*
153 * CEX2A cannot handle p, dp, or U > 128 bytes. 166 * CEX2A cannot handle p, dp, or U > 128 bytes.
154 * If we have one of these, we need to do extra checking. 167 * If we have one of these, we need to do extra checking.
168 * For CEX3A the limit is 256 bytes.
155 */ 169 */
156 if (long_len > 128) { 170 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
171 limit = 256;
172 else
173 limit = 128;
174
175 if (long_len > limit) {
157 /* 176 /*
158 * zcrypt_rsa_crt already checked for the leading 177 * zcrypt_rsa_crt already checked for the leading
159 * zeroes of np_prime, bp_key and u_mult_inc. 178 * zeroes of np_prime, bp_key and u_mult_inc.
160 */ 179 */
161 long_offset = long_len - 128; 180 long_offset = long_len - limit;
162 long_len = 128; 181 long_len = limit;
163 } else 182 } else
164 long_offset = 0; 183 long_offset = 0;
165 184
@@ -180,7 +199,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
180 dq = crb1->dq + sizeof(crb1->dq) - short_len; 199 dq = crb1->dq + sizeof(crb1->dq) - short_len;
181 u = crb1->u + sizeof(crb1->u) - long_len; 200 u = crb1->u + sizeof(crb1->u) - long_len;
182 inp = crb1->message + sizeof(crb1->message) - mod_len; 201 inp = crb1->message + sizeof(crb1->message) - mod_len;
183 } else { 202 } else if (long_len <= 128) {
184 struct type50_crb2_msg *crb2 = ap_msg->message; 203 struct type50_crb2_msg *crb2 = ap_msg->message;
185 memset(crb2, 0, sizeof(*crb2)); 204 memset(crb2, 0, sizeof(*crb2));
186 ap_msg->length = sizeof(*crb2); 205 ap_msg->length = sizeof(*crb2);
@@ -193,6 +212,20 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
193 dq = crb2->dq + sizeof(crb2->dq) - short_len; 212 dq = crb2->dq + sizeof(crb2->dq) - short_len;
194 u = crb2->u + sizeof(crb2->u) - long_len; 213 u = crb2->u + sizeof(crb2->u) - long_len;
195 inp = crb2->message + sizeof(crb2->message) - mod_len; 214 inp = crb2->message + sizeof(crb2->message) - mod_len;
215 } else {
216 /* long_len >= 256 */
217 struct type50_crb3_msg *crb3 = ap_msg->message;
218 memset(crb3, 0, sizeof(*crb3));
219 ap_msg->length = sizeof(*crb3);
220 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
221 crb3->header.msg_len = sizeof(*crb3);
222 crb3->keyblock_type = TYPE50_CRB3_FMT;
223 p = crb3->p + sizeof(crb3->p) - long_len;
224 q = crb3->q + sizeof(crb3->q) - short_len;
225 dp = crb3->dp + sizeof(crb3->dp) - long_len;
226 dq = crb3->dq + sizeof(crb3->dq) - short_len;
227 u = crb3->u + sizeof(crb3->u) - long_len;
228 inp = crb3->message + sizeof(crb3->message) - mod_len;
196 } 229 }
197 230
198 if (copy_from_user(p, crt->np_prime + long_offset, long_len) || 231 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
@@ -203,7 +236,6 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
203 copy_from_user(inp, crt->inputdata, mod_len)) 236 copy_from_user(inp, crt->inputdata, mod_len))
204 return -EFAULT; 237 return -EFAULT;
205 238
206
207 return 0; 239 return 0;
208} 240}
209 241
@@ -230,7 +262,10 @@ static int convert_type80(struct zcrypt_device *zdev,
230 zdev->online = 0; 262 zdev->online = 0;
231 return -EAGAIN; /* repeat the request on a different device. */ 263 return -EAGAIN; /* repeat the request on a different device. */
232 } 264 }
233 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 265 if (zdev->user_space_type == ZCRYPT_CEX2A)
266 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
267 else
268 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
234 data = reply->message + t80h->len - outputdatalength; 269 data = reply->message + t80h->len - outputdatalength;
235 if (copy_to_user(outputdata, data, outputdatalength)) 270 if (copy_to_user(outputdata, data, outputdatalength))
236 return -EFAULT; 271 return -EFAULT;
@@ -282,7 +317,10 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
282 } 317 }
283 t80h = reply->message; 318 t80h = reply->message;
284 if (t80h->type == TYPE80_RSP_CODE) { 319 if (t80h->type == TYPE80_RSP_CODE) {
285 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); 320 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
321 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
322 else
323 length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
286 memcpy(msg->message, reply->message, length); 324 memcpy(msg->message, reply->message, length);
287 } else 325 } else
288 memcpy(msg->message, reply->message, sizeof error_reply); 326 memcpy(msg->message, reply->message, sizeof error_reply);
@@ -307,7 +345,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
307 int rc; 345 int rc;
308 346
309 ap_init_message(&ap_msg); 347 ap_init_message(&ap_msg);
310 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 348 if (zdev->user_space_type == ZCRYPT_CEX2A)
349 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
350 else
351 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
311 if (!ap_msg.message) 352 if (!ap_msg.message)
312 return -ENOMEM; 353 return -ENOMEM;
313 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 354 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -345,7 +386,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
345 int rc; 386 int rc;
346 387
347 ap_init_message(&ap_msg); 388 ap_init_message(&ap_msg);
348 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 389 if (zdev->user_space_type == ZCRYPT_CEX2A)
390 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
391 else
392 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
349 if (!ap_msg.message) 393 if (!ap_msg.message)
350 return -ENOMEM; 394 return -ENOMEM;
351 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 395 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -397,6 +441,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
397 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 441 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
398 zdev->short_crt = 1; 442 zdev->short_crt = 1;
399 zdev->speed_rating = CEX2A_SPEED_RATING; 443 zdev->speed_rating = CEX2A_SPEED_RATING;
444 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
400 break; 445 break;
401 case AP_DEVICE_TYPE_CEX3A: 446 case AP_DEVICE_TYPE_CEX3A:
402 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 447 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
@@ -404,8 +449,13 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
404 return -ENOMEM; 449 return -ENOMEM;
405 zdev->user_space_type = ZCRYPT_CEX3A; 450 zdev->user_space_type = ZCRYPT_CEX3A;
406 zdev->type_string = "CEX3A"; 451 zdev->type_string = "CEX3A";
407 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE; 452 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
408 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 453 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
454 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
455 if (ap_4096_commands_available(ap_dev->qid)) {
456 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
457 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
458 }
409 zdev->short_crt = 1; 459 zdev->short_crt = 1;
410 zdev->speed_rating = CEX3A_SPEED_RATING; 460 zdev->speed_rating = CEX3A_SPEED_RATING;
411 break; 461 break;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 8f69d1dacab8..0350665810cf 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -51,8 +51,10 @@ struct type50_hdr {
51 51
52#define TYPE50_MEB1_FMT 0x0001 52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002 53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_MEB3_FMT 0x0003
54#define TYPE50_CRB1_FMT 0x0011 55#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012 56#define TYPE50_CRB2_FMT 0x0012
57#define TYPE50_CRB3_FMT 0x0013
56 58
57/* Mod-Exp, with a small modulus */ 59/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg { 60struct type50_meb1_msg {
@@ -74,6 +76,16 @@ struct type50_meb2_msg {
74 unsigned char message[256]; 76 unsigned char message[256];
75} __attribute__((packed)); 77} __attribute__((packed));
76 78
79/* Mod-Exp, with a larger modulus */
80struct type50_meb3_msg {
81 struct type50_hdr header;
82 unsigned short keyblock_type; /* 0x0003 */
83 unsigned char reserved[6];
84 unsigned char exponent[512];
85 unsigned char modulus[512];
86 unsigned char message[512];
87} __attribute__((packed));
88
77/* CRT, with a small modulus */ 89/* CRT, with a small modulus */
78struct type50_crb1_msg { 90struct type50_crb1_msg {
79 struct type50_hdr header; 91 struct type50_hdr header;
@@ -100,6 +112,19 @@ struct type50_crb2_msg {
100 unsigned char message[256]; 112 unsigned char message[256];
101} __attribute__((packed)); 113} __attribute__((packed));
102 114
115/* CRT, with a larger modulus */
116struct type50_crb3_msg {
117 struct type50_hdr header;
118 unsigned short keyblock_type; /* 0x0013 */
119 unsigned char reserved[6];
120 unsigned char p[256];
121 unsigned char q[256];
122 unsigned char dp[256];
123 unsigned char dq[256];
124 unsigned char u[256];
125 unsigned char message[512];
126} __attribute__((packed));
127
103/** 128/**
104 * The type 80 response family is associated with a CEX2A card. 129 * The type 80 response family is associated with a CEX2A card.
105 * 130 *
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 09e934b295a0..1afb69c75fea 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -373,6 +373,7 @@ static int zcrypt_pcica_probe(struct ap_device *ap_dev)
373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE; 373 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE; 374 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
375 zdev->speed_rating = PCICA_SPEED_RATING; 375 zdev->speed_rating = PCICA_SPEED_RATING;
376 zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
376 ap_dev->reply = &zdev->reply; 377 ap_dev->reply = &zdev->reply;
377 ap_dev->private = zdev; 378 ap_dev->private = zdev;
378 rc = zcrypt_device_register(zdev); 379 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 9dec5c77cff4..aa4c050a5694 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -579,6 +579,7 @@ static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE; 579 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE; 580 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
581 zdev->speed_rating = PCICC_SPEED_RATING; 581 zdev->speed_rating = PCICC_SPEED_RATING;
582 zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
582 ap_dev->reply = &zdev->reply; 583 ap_dev->reply = &zdev->reply;
583 ap_dev->private = zdev; 584 ap_dev->private = zdev;
584 rc = zcrypt_device_register(zdev); 585 rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 510fab4577d4..4f85eb725f4f 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -45,12 +45,12 @@
45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
48#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE 48#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
49 49
50#define PCIXCC_MCL2_SPEED_RATING 7870 50#define PCIXCC_MCL2_SPEED_RATING 7870
51#define PCIXCC_MCL3_SPEED_RATING 7870 51#define PCIXCC_MCL3_SPEED_RATING 7870
52#define CEX2C_SPEED_RATING 7000 52#define CEX2C_SPEED_RATING 7000
53#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */ 53#define CEX3C_SPEED_RATING 6500
54 54
55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -567,6 +567,15 @@ static int convert_response_ica(struct zcrypt_device *zdev,
567 case TYPE88_RSP_CODE: 567 case TYPE88_RSP_CODE:
568 return convert_error(zdev, reply); 568 return convert_error(zdev, reply);
569 case TYPE86_RSP_CODE: 569 case TYPE86_RSP_CODE:
570 if (msg->cprbx.ccp_rtcode &&
571 (msg->cprbx.ccp_rscode == 0x14f) &&
572 (outputdatalength > 256)) {
573 if (zdev->max_exp_bit_length <= 17) {
574 zdev->max_exp_bit_length = 17;
575 return -EAGAIN;
576 } else
577 return -EINVAL;
578 }
570 if (msg->hdr.reply_code) 579 if (msg->hdr.reply_code)
571 return convert_error(zdev, reply); 580 return convert_error(zdev, reply);
572 if (msg->cprbx.cprb_ver_id == 0x02) 581 if (msg->cprbx.cprb_ver_id == 0x02)
@@ -1052,11 +1061,13 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1052 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; 1061 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
1053 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 1062 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
1054 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1063 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1064 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1055 } else { 1065 } else {
1056 zdev->type_string = "PCIXCC_MCL3"; 1066 zdev->type_string = "PCIXCC_MCL3";
1057 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; 1067 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
1058 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1068 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1059 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1069 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1070 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1060 } 1071 }
1061 break; 1072 break;
1062 case AP_DEVICE_TYPE_CEX2C: 1073 case AP_DEVICE_TYPE_CEX2C:
@@ -1065,6 +1076,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1065 zdev->speed_rating = CEX2C_SPEED_RATING; 1076 zdev->speed_rating = CEX2C_SPEED_RATING;
1066 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1077 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1067 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1078 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1079 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
1068 break; 1080 break;
1069 case AP_DEVICE_TYPE_CEX3C: 1081 case AP_DEVICE_TYPE_CEX3C:
1070 zdev->user_space_type = ZCRYPT_CEX3C; 1082 zdev->user_space_type = ZCRYPT_CEX3C;
@@ -1072,6 +1084,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1072 zdev->speed_rating = CEX3C_SPEED_RATING; 1084 zdev->speed_rating = CEX3C_SPEED_RATING;
1073 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 1085 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1074 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 1086 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1087 zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
1075 break; 1088 break;
1076 default: 1089 default:
1077 goto out_free; 1090 goto out_free;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 375aeeaf9ea5..414427d64a8f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -10,6 +10,7 @@
10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> 10 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
11 */ 11 */
12 12
13#include <linux/kernel_stat.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/bootmem.h> 15#include <linux/bootmem.h>
15#include <linux/err.h> 16#include <linux/err.h>
@@ -25,6 +26,7 @@
25#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/s390_ext.h> 28#include <asm/s390_ext.h>
29#include <asm/irq.h>
28 30
29#define VIRTIO_SUBCODE_64 0x0D00 31#define VIRTIO_SUBCODE_64 0x0D00
30 32
@@ -379,6 +381,7 @@ static void kvm_extint_handler(unsigned int ext_int_code,
379 u16 subcode; 381 u16 subcode;
380 u32 param; 382 u32 param;
381 383
384 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
382 subcode = ext_int_code >> 16; 385 subcode = ext_int_code >> 16;
383 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 386 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
384 return; 387 return;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 456b18743397..fa80ba1f0344 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -2,7 +2,8 @@ menu "S/390 network device drivers"
2 depends on NETDEVICES && S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 def_tristate m
6 prompt "Lan Channel Station Interface"
6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 8 help
8 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
@@ -12,7 +13,8 @@ config LCS
12 If you do not know what it is, it's safe to choose Y. 13 If you do not know what it is, it's safe to choose Y.
13 14
14config CTCM 15config CTCM
15 tristate "CTC and MPC SNA device support" 16 def_tristate m
17 prompt "CTC and MPC SNA device support"
16 depends on CCW && NETDEVICES 18 depends on CCW && NETDEVICES
17 help 19 help
18 Select this option if you want to use channel-to-channel 20 Select this option if you want to use channel-to-channel
@@ -26,7 +28,8 @@ config CTCM
26 If you do not need any channel-to-channel connection, choose N. 28 If you do not need any channel-to-channel connection, choose N.
27 29
28config NETIUCV 30config NETIUCV
29 tristate "IUCV network device support (VM only)" 31 def_tristate m
32 prompt "IUCV network device support (VM only)"
30 depends on IUCV && NETDEVICES 33 depends on IUCV && NETDEVICES
31 help 34 help
32 Select this option if you want to use inter-user communication 35 Select this option if you want to use inter-user communication
@@ -37,14 +40,16 @@ config NETIUCV
37 The module name is netiucv. If unsure, choose Y. 40 The module name is netiucv. If unsure, choose Y.
38 41
39config SMSGIUCV 42config SMSGIUCV
40 tristate "IUCV special message support (VM only)" 43 def_tristate m
44 prompt "IUCV special message support (VM only)"
41 depends on IUCV 45 depends on IUCV
42 help 46 help
43 Select this option if you want to be able to receive SMSG messages 47 Select this option if you want to be able to receive SMSG messages
44 from other VM guest systems. 48 from other VM guest systems.
45 49
46config SMSGIUCV_EVENT 50config SMSGIUCV_EVENT
47 tristate "Deliver IUCV special messages as uevents (VM only)" 51 def_tristate m
52 prompt "Deliver IUCV special messages as uevents (VM only)"
48 depends on SMSGIUCV 53 depends on SMSGIUCV
49 help 54 help
50 Select this option to deliver CP special messages (SMSGs) as 55 Select this option to deliver CP special messages (SMSGs) as
@@ -54,7 +59,8 @@ config SMSGIUCV_EVENT
54 To compile as a module, choose M. The module name is "smsgiucv_app". 59 To compile as a module, choose M. The module name is "smsgiucv_app".
55 60
56config CLAW 61config CLAW
57 tristate "CLAW device support" 62 def_tristate m
63 prompt "CLAW device support"
58 depends on CCW && NETDEVICES 64 depends on CCW && NETDEVICES
59 help 65 help
60 This driver supports channel attached CLAW devices. 66 This driver supports channel attached CLAW devices.
@@ -64,7 +70,8 @@ config CLAW
64 To compile into the kernel, choose Y. 70 To compile into the kernel, choose Y.
65 71
66config QETH 72config QETH
67 tristate "Gigabit Ethernet device support" 73 def_tristate y
74 prompt "Gigabit Ethernet device support"
68 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO 75 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
69 help 76 help
70 This driver supports the IBM System z OSA Express adapters 77 This driver supports the IBM System z OSA Express adapters
@@ -78,25 +85,25 @@ config QETH
78 The module name is qeth. 85 The module name is qeth.
79 86
80config QETH_L2 87config QETH_L2
81 tristate "qeth layer 2 device support" 88 def_tristate y
82 depends on QETH 89 prompt "qeth layer 2 device support"
83 help 90 depends on QETH
84 Select this option to be able to run qeth devices in layer 2 mode. 91 help
85 To compile as a module, choose M. The module name is qeth_l2. 92 Select this option to be able to run qeth devices in layer 2 mode.
86 If unsure, choose y. 93 To compile as a module, choose M. The module name is qeth_l2.
94 If unsure, choose y.
87 95
88config QETH_L3 96config QETH_L3
89 tristate "qeth layer 3 device support" 97 def_tristate y
90 depends on QETH 98 prompt "qeth layer 3 device support"
91 help 99 depends on QETH
92 Select this option to be able to run qeth devices in layer 3 mode. 100 help
93 To compile as a module choose M. The module name is qeth_l3. 101 Select this option to be able to run qeth devices in layer 3 mode.
94 If unsure, choose Y. 102 To compile as a module choose M. The module name is qeth_l3.
103 If unsure, choose Y.
95 104
96config QETH_IPV6 105config QETH_IPV6
97 bool 106 def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
98 depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
99 default y
100 107
101config CCWGROUP 108config CCWGROUP
102 tristate 109 tristate
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 8e4153d740f3..ce3a5c13ce0b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,6 +63,7 @@
63 63
64#define KMSG_COMPONENT "claw" 64#define KMSG_COMPONENT "claw"
65 65
66#include <linux/kernel_stat.h>
66#include <asm/ccwdev.h> 67#include <asm/ccwdev.h>
67#include <asm/ccwgroup.h> 68#include <asm/ccwgroup.h>
68#include <asm/debug.h> 69#include <asm/debug.h>
@@ -640,6 +641,7 @@ claw_irq_handler(struct ccw_device *cdev,
640 struct claw_env *p_env; 641 struct claw_env *p_env;
641 struct chbk *p_ch_r=NULL; 642 struct chbk *p_ch_r=NULL;
642 643
644 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
643 CLAW_DBF_TEXT(4, trace, "clawirq"); 645 CLAW_DBF_TEXT(4, trace, "clawirq");
644 /* Bypass all 'unsolicited interrupts' */ 646 /* Bypass all 'unsolicited interrupts' */
645 privptr = dev_get_drvdata(&cdev->dev); 647 privptr = dev_get_drvdata(&cdev->dev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2c7d2d9be4d0..4c2845985927 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,6 +24,7 @@
24#define KMSG_COMPONENT "ctcm" 24#define KMSG_COMPONENT "ctcm"
25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 26
27#include <linux/kernel_stat.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/init.h> 29#include <linux/init.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -1204,6 +1205,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1204 int cstat; 1205 int cstat;
1205 int dstat; 1206 int dstat;
1206 1207
1208 kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1209 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); 1210 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1209 1211
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c9f13b9ea339..09e7a053c844 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,6 +26,7 @@
26#define KMSG_COMPONENT "lcs" 26#define KMSG_COMPONENT "lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 28
29#include <linux/kernel_stat.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/if.h> 31#include <linux/if.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -1398,6 +1399,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1398 int rc, index; 1399 int rc, index;
1399 int cstat, dstat; 1400 int cstat, dstat;
1400 1401
1402 kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
1401 if (lcs_check_irb_error(cdev, irb)) 1403 if (lcs_check_irb_error(cdev, irb))
1402 return; 1404 return;
1403 1405
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b7d9dc0adc62..29f848bfc12f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3831,6 +3831,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
3831 init_data.int_parm = (unsigned long) card; 3831 init_data.int_parm = (unsigned long) card;
3832 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3832 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3833 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3833 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3834 init_data.scan_threshold =
3835 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
3834 3836
3835 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 3837 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
3836 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 3838 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 2511f92302dd..8da5ed644c2b 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -290,6 +290,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
290 id->int_parm = (unsigned long) qdio; 290 id->int_parm = (unsigned long) qdio;
291 id->input_sbal_addr_array = (void **) (qdio->res_q); 291 id->input_sbal_addr_array = (void **) (qdio->res_q);
292 id->output_sbal_addr_array = (void **) (qdio->req_q); 292 id->output_sbal_addr_array = (void **) (qdio->req_q);
293 id->scan_threshold =
294 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
293} 295}
294 296
295/** 297/**