aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-10-17 12:20:26 -0400
committerArjan van de Ven <arjan@linux.intel.com>2008-10-17 12:20:26 -0400
commit651dab4264e4ba0e563f5ff56f748127246e9065 (patch)
tree016630974bdcb00fe529b673f96d389e0fd6dc94 /drivers/s390
parent40b8606253552109815786e5d4b0de98782d31f5 (diff)
parent2e532d68a2b3e2aa6b19731501222069735c741c (diff)
Merge commit 'linus/master' into merge-linus
Conflicts: arch/x86/kvm/i8254.c
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c32
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c28
-rw-r--r--drivers/s390/block/dasd_eckd.c132
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c520
-rw-r--r--drivers/s390/block/xpram.c37
-rw-r--r--drivers/s390/char/con3215.c53
-rw-r--r--drivers/s390/char/con3270.c27
-rw-r--r--drivers/s390/char/fs3270.c17
-rw-r--r--drivers/s390/char/raw3270.c14
-rw-r--r--drivers/s390/char/sclp_con.c24
-rw-r--r--drivers/s390/char/sclp_vt220.c26
-rw-r--r--drivers/s390/char/tape_3590.c132
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/char/tape_core.c21
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tape_std.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c11
-rw-r--r--drivers/s390/char/vmur.c11
-rw-r--r--drivers/s390/cio/blacklist.c11
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/chp.c10
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c61
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c36
-rw-r--r--drivers/s390/cio/device.c118
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c13
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/io_sch.h22
-rw-r--r--drivers/s390/cio/ioasm.h49
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_main.c32
-rw-r--r--drivers/s390/cio/qdio_setup.c55
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c15
-rw-r--r--drivers/s390/net/claw.c36
-rw-r--r--drivers/s390/net/claw.h2
-rw-r--r--drivers/s390/net/ctcm_main.c22
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/lcs.c34
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h10
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c14
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/s390_rdev.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c149
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c51
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c75
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h183
-rw-r--r--drivers/s390/scsi/zfcp_erp.c231
-rw-r--r--drivers/s390/scsi/zfcp_ext.h27
-rw-r--r--drivers/s390/scsi/zfcp_fc.c252
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c616
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h75
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c67
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c28
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c62
68 files changed, 1975 insertions, 1574 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acb78017e7d0..0a225ccda026 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -215,7 +215,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
215 return rc; 215 return rc;
216 } 216 }
217 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 217 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
218 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1, 218 device->debug_area = debug_register(dev_name(&device->cdev->dev), 1, 1,
219 8 * sizeof(long)); 219 8 * sizeof(long));
220 debug_register_view(device->debug_area, &debug_sprintf_view); 220 debug_register_view(device->debug_area, &debug_sprintf_view);
221 debug_set_level(device->debug_area, DBF_WARNING); 221 debug_set_level(device->debug_area, DBF_WARNING);
@@ -933,7 +933,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
933 MESSAGE(KERN_DEBUG, 933 MESSAGE(KERN_DEBUG,
934 "invalid status in handle_killed_request: " 934 "invalid status in handle_killed_request: "
935 "bus_id %s, status %02x", 935 "bus_id %s, status %02x",
936 cdev->dev.bus_id, cqr->status); 936 dev_name(&cdev->dev), cqr->status);
937 return; 937 return;
938 } 938 }
939 939
@@ -942,7 +942,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
942 device != dasd_device_from_cdev_locked(cdev) || 942 device != dasd_device_from_cdev_locked(cdev) ||
943 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 943 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
944 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 944 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
945 cdev->dev.bus_id); 945 dev_name(&cdev->dev));
946 return; 946 return;
947 } 947 }
948 948
@@ -982,11 +982,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
982 break; 982 break;
983 case -ETIMEDOUT: 983 case -ETIMEDOUT:
984 printk(KERN_WARNING"%s(%s): request timed out\n", 984 printk(KERN_WARNING"%s(%s): request timed out\n",
985 __func__, cdev->dev.bus_id); 985 __func__, dev_name(&cdev->dev));
986 break; 986 break;
987 default: 987 default:
988 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 988 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
989 __func__, cdev->dev.bus_id, PTR_ERR(irb)); 989 __func__, dev_name(&cdev->dev), PTR_ERR(irb));
990 } 990 }
991 dasd_handle_killed_request(cdev, intparm); 991 dasd_handle_killed_request(cdev, intparm);
992 return; 992 return;
@@ -995,7 +995,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
995 now = get_clock(); 995 now = get_clock();
996 996
997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
998 cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) | 998 dev_name(&cdev->dev), ((irb->scsw.cmd.cstat << 8) |
999 irb->scsw.cmd.dstat), (unsigned int) intparm); 999 irb->scsw.cmd.dstat), (unsigned int) intparm);
1000 1000
1001 /* check for unsolicited interrupts */ 1001 /* check for unsolicited interrupts */
@@ -1019,7 +1019,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1019 if (!device || 1019 if (!device ||
1020 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1020 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1021 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1021 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
1022 cdev->dev.bus_id); 1022 dev_name(&cdev->dev));
1023 return; 1023 return;
1024 } 1024 }
1025 1025
@@ -1037,7 +1037,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1037 if (cqr->status != DASD_CQR_IN_IO) { 1037 if (cqr->status != DASD_CQR_IN_IO) {
1038 MESSAGE(KERN_DEBUG, 1038 MESSAGE(KERN_DEBUG,
1039 "invalid status: bus_id %s, status %02x", 1039 "invalid status: bus_id %s, status %02x",
1040 cdev->dev.bus_id, cqr->status); 1040 dev_name(&cdev->dev), cqr->status);
1041 return; 1041 return;
1042 } 1042 }
1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
@@ -2134,14 +2134,14 @@ int dasd_generic_probe(struct ccw_device *cdev,
2134 if (ret) { 2134 if (ret) {
2135 printk(KERN_WARNING 2135 printk(KERN_WARNING
2136 "dasd_generic_probe: could not set ccw-device options " 2136 "dasd_generic_probe: could not set ccw-device options "
2137 "for %s\n", cdev->dev.bus_id); 2137 "for %s\n", dev_name(&cdev->dev));
2138 return ret; 2138 return ret;
2139 } 2139 }
2140 ret = dasd_add_sysfs_files(cdev); 2140 ret = dasd_add_sysfs_files(cdev);
2141 if (ret) { 2141 if (ret) {
2142 printk(KERN_WARNING 2142 printk(KERN_WARNING
2143 "dasd_generic_probe: could not add sysfs entries " 2143 "dasd_generic_probe: could not add sysfs entries "
2144 "for %s\n", cdev->dev.bus_id); 2144 "for %s\n", dev_name(&cdev->dev));
2145 return ret; 2145 return ret;
2146 } 2146 }
2147 cdev->handler = &dasd_int_handler; 2147 cdev->handler = &dasd_int_handler;
@@ -2152,13 +2152,13 @@ int dasd_generic_probe(struct ccw_device *cdev,
2152 * initial probe. 2152 * initial probe.
2153 */ 2153 */
2154 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2154 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2155 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 2155 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2156 ret = ccw_device_set_online(cdev); 2156 ret = ccw_device_set_online(cdev);
2157 if (ret) 2157 if (ret)
2158 printk(KERN_WARNING 2158 printk(KERN_WARNING
2159 "dasd_generic_probe: could not initially " 2159 "dasd_generic_probe: could not initially "
2160 "online ccw-device %s; return code: %d\n", 2160 "online ccw-device %s; return code: %d\n",
2161 cdev->dev.bus_id, ret); 2161 dev_name(&cdev->dev), ret);
2162 return 0; 2162 return 0;
2163} 2163}
2164 2164
@@ -2224,7 +2224,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2224 printk (KERN_WARNING 2224 printk (KERN_WARNING
2225 "dasd_generic couldn't online device %s " 2225 "dasd_generic couldn't online device %s "
2226 "- discipline DIAG not available\n", 2226 "- discipline DIAG not available\n",
2227 cdev->dev.bus_id); 2227 dev_name(&cdev->dev));
2228 dasd_delete_device(device); 2228 dasd_delete_device(device);
2229 return -ENODEV; 2229 return -ENODEV;
2230 } 2230 }
@@ -2248,7 +2248,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2248 printk (KERN_WARNING 2248 printk (KERN_WARNING
2249 "dasd_generic couldn't online device %s " 2249 "dasd_generic couldn't online device %s "
2250 "with discipline %s rc=%i\n", 2250 "with discipline %s rc=%i\n",
2251 cdev->dev.bus_id, discipline->name, rc); 2251 dev_name(&cdev->dev), discipline->name, rc);
2252 module_put(discipline->owner); 2252 module_put(discipline->owner);
2253 module_put(base_discipline->owner); 2253 module_put(base_discipline->owner);
2254 dasd_delete_device(device); 2254 dasd_delete_device(device);
@@ -2259,7 +2259,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2259 if (device->state <= DASD_STATE_KNOWN) { 2259 if (device->state <= DASD_STATE_KNOWN) {
2260 printk (KERN_WARNING 2260 printk (KERN_WARNING
2261 "dasd_generic discipline not found for %s\n", 2261 "dasd_generic discipline not found for %s\n",
2262 cdev->dev.bus_id); 2262 dev_name(&cdev->dev));
2263 rc = -ENODEV; 2263 rc = -ENODEV;
2264 dasd_set_target_state(device, DASD_STATE_NEW); 2264 dasd_set_target_state(device, DASD_STATE_NEW);
2265 if (device->block) 2265 if (device->block)
@@ -2267,7 +2267,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2267 dasd_delete_device(device); 2267 dasd_delete_device(device);
2268 } else 2268 } else
2269 pr_debug("dasd_generic device %s found\n", 2269 pr_debug("dasd_generic device %s found\n",
2270 cdev->dev.bus_id); 2270 dev_name(&cdev->dev));
2271 2271
2272 /* FIXME: we have to wait for the root device but we don't want 2272 /* FIXME: we have to wait for the root device but we don't want
2273 * to wait for each single device but for all at once. */ 2273 * to wait for each single device but for all at once. */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 5c6e6f331cb0..b8f9c00633f3 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1397,7 +1397,7 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1397 DEV_MESSAGE(KERN_ERR, cqr->startdev, 1397 DEV_MESSAGE(KERN_ERR, cqr->startdev,
1398 "ERP on alias device for request %p," 1398 "ERP on alias device for request %p,"
1399 " recover on base device %s", cqr, 1399 " recover on base device %s", cqr,
1400 cqr->block->base->cdev->dev.bus_id); 1400 dev_name(&cqr->block->base->cdev->dev));
1401 } 1401 }
1402 dasd_eckd_reset_ccw_to_base_io(cqr); 1402 dasd_eckd_reset_ccw_to_base_io(cqr);
1403 erp->startdev = cqr->block->base; 1403 erp->startdev = cqr->block->base;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index cd3335c1c307..921443b01d16 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -515,9 +515,9 @@ dasd_devmap_from_cdev(struct ccw_device *cdev)
515{ 515{
516 struct dasd_devmap *devmap; 516 struct dasd_devmap *devmap;
517 517
518 devmap = dasd_find_busid(cdev->dev.bus_id); 518 devmap = dasd_find_busid(dev_name(&cdev->dev));
519 if (IS_ERR(devmap)) 519 if (IS_ERR(devmap))
520 devmap = dasd_add_busid(cdev->dev.bus_id, 520 devmap = dasd_add_busid(dev_name(&cdev->dev),
521 DASD_FEATURE_DEFAULT); 521 DASD_FEATURE_DEFAULT);
522 return devmap; 522 return devmap;
523} 523}
@@ -584,7 +584,7 @@ dasd_delete_device(struct dasd_device *device)
584 unsigned long flags; 584 unsigned long flags;
585 585
586 /* First remove device pointer from devmap. */ 586 /* First remove device pointer from devmap. */
587 devmap = dasd_find_busid(device->cdev->dev.bus_id); 587 devmap = dasd_find_busid(dev_name(&device->cdev->dev));
588 BUG_ON(IS_ERR(devmap)); 588 BUG_ON(IS_ERR(devmap));
589 spin_lock(&dasd_devmap_lock); 589 spin_lock(&dasd_devmap_lock);
590 if (devmap->device != device) { 590 if (devmap->device != device) {
@@ -674,7 +674,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
674 struct dasd_devmap *devmap; 674 struct dasd_devmap *devmap;
675 int ro_flag; 675 int ro_flag;
676 676
677 devmap = dasd_find_busid(dev->bus_id); 677 devmap = dasd_find_busid(dev_name(dev));
678 if (!IS_ERR(devmap)) 678 if (!IS_ERR(devmap))
679 ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0; 679 ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
680 else 680 else
@@ -723,7 +723,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
723 struct dasd_devmap *devmap; 723 struct dasd_devmap *devmap;
724 int erplog; 724 int erplog;
725 725
726 devmap = dasd_find_busid(dev->bus_id); 726 devmap = dasd_find_busid(dev_name(dev));
727 if (!IS_ERR(devmap)) 727 if (!IS_ERR(devmap))
728 erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0; 728 erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
729 else 729 else
@@ -770,7 +770,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
770 struct dasd_devmap *devmap; 770 struct dasd_devmap *devmap;
771 int use_diag; 771 int use_diag;
772 772
773 devmap = dasd_find_busid(dev->bus_id); 773 devmap = dasd_find_busid(dev_name(dev));
774 if (!IS_ERR(devmap)) 774 if (!IS_ERR(devmap))
775 use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0; 775 use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
776 else 776 else
@@ -876,7 +876,7 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
876 struct dasd_devmap *devmap; 876 struct dasd_devmap *devmap;
877 int alias; 877 int alias;
878 878
879 devmap = dasd_find_busid(dev->bus_id); 879 devmap = dasd_find_busid(dev_name(dev));
880 spin_lock(&dasd_devmap_lock); 880 spin_lock(&dasd_devmap_lock);
881 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { 881 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
882 spin_unlock(&dasd_devmap_lock); 882 spin_unlock(&dasd_devmap_lock);
@@ -899,7 +899,7 @@ dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
899 struct dasd_devmap *devmap; 899 struct dasd_devmap *devmap;
900 char *vendor; 900 char *vendor;
901 901
902 devmap = dasd_find_busid(dev->bus_id); 902 devmap = dasd_find_busid(dev_name(dev));
903 spin_lock(&dasd_devmap_lock); 903 spin_lock(&dasd_devmap_lock);
904 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) 904 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
905 vendor = devmap->uid.vendor; 905 vendor = devmap->uid.vendor;
@@ -924,7 +924,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
924 char ua_string[3]; 924 char ua_string[3];
925 struct dasd_uid *uid; 925 struct dasd_uid *uid;
926 926
927 devmap = dasd_find_busid(dev->bus_id); 927 devmap = dasd_find_busid(dev_name(dev));
928 spin_lock(&dasd_devmap_lock); 928 spin_lock(&dasd_devmap_lock);
929 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { 929 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
930 spin_unlock(&dasd_devmap_lock); 930 spin_unlock(&dasd_devmap_lock);
@@ -972,7 +972,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
972 struct dasd_devmap *devmap; 972 struct dasd_devmap *devmap;
973 int eer_flag; 973 int eer_flag;
974 974
975 devmap = dasd_find_busid(dev->bus_id); 975 devmap = dasd_find_busid(dev_name(dev));
976 if (!IS_ERR(devmap) && devmap->device) 976 if (!IS_ERR(devmap) && devmap->device)
977 eer_flag = dasd_eer_enabled(devmap->device); 977 eer_flag = dasd_eer_enabled(devmap->device);
978 else 978 else
@@ -1034,7 +1034,7 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1034{ 1034{
1035 struct dasd_devmap *devmap; 1035 struct dasd_devmap *devmap;
1036 1036
1037 devmap = dasd_find_busid(cdev->dev.bus_id); 1037 devmap = dasd_find_busid(dev_name(&cdev->dev));
1038 if (IS_ERR(devmap)) 1038 if (IS_ERR(devmap))
1039 return PTR_ERR(devmap); 1039 return PTR_ERR(devmap);
1040 spin_lock(&dasd_devmap_lock); 1040 spin_lock(&dasd_devmap_lock);
@@ -1057,7 +1057,7 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1057{ 1057{
1058 struct dasd_devmap *devmap; 1058 struct dasd_devmap *devmap;
1059 1059
1060 devmap = dasd_find_busid(cdev->dev.bus_id); 1060 devmap = dasd_find_busid(dev_name(&cdev->dev));
1061 if (IS_ERR(devmap)) 1061 if (IS_ERR(devmap))
1062 return PTR_ERR(devmap); 1062 return PTR_ERR(devmap);
1063 1063
@@ -1077,7 +1077,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
1077{ 1077{
1078 struct dasd_devmap *devmap; 1078 struct dasd_devmap *devmap;
1079 1079
1080 devmap = dasd_find_busid(cdev->dev.bus_id); 1080 devmap = dasd_find_busid(dev_name(&cdev->dev));
1081 if (IS_ERR(devmap)) 1081 if (IS_ERR(devmap))
1082 return PTR_ERR(devmap); 1082 return PTR_ERR(devmap);
1083 1083
@@ -1093,7 +1093,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
1093{ 1093{
1094 struct dasd_devmap *devmap; 1094 struct dasd_devmap *devmap;
1095 1095
1096 devmap = dasd_find_busid(cdev->dev.bus_id); 1096 devmap = dasd_find_busid(dev_name(&cdev->dev));
1097 if (IS_ERR(devmap)) 1097 if (IS_ERR(devmap))
1098 return PTR_ERR(devmap); 1098 return PTR_ERR(devmap);
1099 1099
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 773b3fe275b2..49f9d221e23d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6,6 +6,8 @@
6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
9 * 11 *
10 */ 12 */
11 13
@@ -84,7 +86,7 @@ dasd_eckd_probe (struct ccw_device *cdev)
84 if (ret) { 86 if (ret) {
85 printk(KERN_WARNING 87 printk(KERN_WARNING
86 "dasd_eckd_probe: could not set ccw-device options " 88 "dasd_eckd_probe: could not set ccw-device options "
87 "for %s\n", cdev->dev.bus_id); 89 "for %s\n", dev_name(&cdev->dev));
88 return ret; 90 return ret;
89 } 91 }
90 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 92 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -1501,12 +1503,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1501 return; 1503 return;
1502 } 1504 }
1503 1505
1504 /* just report other unsolicited interrupts */ 1506 if ((irb->scsw.cmd.cc == 1) &&
1505 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1507 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1506 "unsolicited interrupt received"); 1508 (irb->scsw.cmd.actl & SCSW_ACTL_START_PEND) &&
1507 device->discipline->dump_sense(device, NULL, irb); 1509 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND)) {
1508 dasd_schedule_device_bh(device); 1510 /* fake irb do nothing, they are handled elsewhere */
1511 dasd_schedule_device_bh(device);
1512 return;
1513 }
1514
1515 if (!(irb->esw.esw0.erw.cons)) {
1516 /* just report other unsolicited interrupts */
1517 DEV_MESSAGE(KERN_ERR, device, "%s",
1518 "unsolicited interrupt received");
1519 } else {
1520 DEV_MESSAGE(KERN_ERR, device, "%s",
1521 "unsolicited interrupt received "
1522 "(sense available)");
1523 device->discipline->dump_sense(device, NULL, irb);
1524 }
1509 1525
1526 dasd_schedule_device_bh(device);
1510 return; 1527 return;
1511}; 1528};
1512 1529
@@ -2068,6 +2085,103 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2068 return 0; 2085 return 0;
2069} 2086}
2070 2087
2088/*
2089 * Issue syscall I/O to EMC Symmetrix array.
2090 * CCWs are PSF and RSSD
2091 */
2092static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2093{
2094 struct dasd_symmio_parms usrparm;
2095 char *psf_data, *rssd_result;
2096 struct dasd_ccw_req *cqr;
2097 struct ccw1 *ccw;
2098 int rc;
2099
2100 /* Copy parms from caller */
2101 rc = -EFAULT;
2102 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2103 goto out;
2104#ifndef CONFIG_64BIT
2105 /* Make sure pointers are sane even on 31 bit. */
2106 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2107 rc = -EINVAL;
2108 goto out;
2109 }
2110#endif
2111 /* alloc I/O data area */
2112 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2113 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2114 if (!psf_data || !rssd_result) {
2115 rc = -ENOMEM;
2116 goto out_free;
2117 }
2118
2119 /* get syscall header from user space */
2120 rc = -EFAULT;
2121 if (copy_from_user(psf_data,
2122 (void __user *)(unsigned long) usrparm.psf_data,
2123 usrparm.psf_data_len))
2124 goto out_free;
2125
2126 /* sanity check on syscall header */
2127 if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2128 rc = -EINVAL;
2129 goto out_free;
2130 }
2131
2132 /* setup CCWs for PSF + RSSD */
2133 cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
2134 if (IS_ERR(cqr)) {
2135 DEV_MESSAGE(KERN_WARNING, device, "%s",
2136 "Could not allocate initialization request");
2137 rc = PTR_ERR(cqr);
2138 goto out_free;
2139 }
2140
2141 cqr->startdev = device;
2142 cqr->memdev = device;
2143 cqr->retries = 3;
2144 cqr->expires = 10 * HZ;
2145 cqr->buildclk = get_clock();
2146 cqr->status = DASD_CQR_FILLED;
2147
2148 /* Build the ccws */
2149 ccw = cqr->cpaddr;
2150
2151 /* PSF ccw */
2152 ccw->cmd_code = DASD_ECKD_CCW_PSF;
2153 ccw->count = usrparm.psf_data_len;
2154 ccw->flags |= CCW_FLAG_CC;
2155 ccw->cda = (__u32)(addr_t) psf_data;
2156
2157 ccw++;
2158
2159 /* RSSD ccw */
2160 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2161 ccw->count = usrparm.rssd_result_len;
2162 ccw->flags = CCW_FLAG_SLI ;
2163 ccw->cda = (__u32)(addr_t) rssd_result;
2164
2165 rc = dasd_sleep_on(cqr);
2166 if (rc)
2167 goto out_sfree;
2168
2169 rc = -EFAULT;
2170 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2171 rssd_result, usrparm.rssd_result_len))
2172 goto out_sfree;
2173 rc = 0;
2174
2175out_sfree:
2176 dasd_sfree_request(cqr, cqr->memdev);
2177out_free:
2178 kfree(rssd_result);
2179 kfree(psf_data);
2180out:
2181 DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
2182 return rc;
2183}
2184
2071static int 2185static int
2072dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 2186dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2073{ 2187{
@@ -2086,6 +2200,8 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2086 return dasd_eckd_reserve(device); 2200 return dasd_eckd_reserve(device);
2087 case BIODASDSLCK: 2201 case BIODASDSLCK:
2088 return dasd_eckd_steal_lock(device); 2202 return dasd_eckd_steal_lock(device);
2203 case BIODASDSYMMIO:
2204 return dasd_symm_io(device, argp);
2089 default: 2205 default:
2090 return -ENOIOCTLCMD; 2206 return -ENOIOCTLCMD;
2091 } 2207 }
@@ -2145,13 +2261,13 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2145 /* dump the sense data */ 2261 /* dump the sense data */
2146 len = sprintf(page, KERN_ERR PRINTK_HEADER 2262 len = sprintf(page, KERN_ERR PRINTK_HEADER
2147 " I/O status report for device %s:\n", 2263 " I/O status report for device %s:\n",
2148 device->cdev->dev.bus_id); 2264 dev_name(&device->cdev->dev));
2149 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2265 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2150 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 2266 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2151 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 2267 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
2152 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2268 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2153 " device %s: Failing CCW: %p\n", 2269 " device %s: Failing CCW: %p\n",
2154 device->cdev->dev.bus_id, 2270 dev_name(&device->cdev->dev),
2155 (void *) (addr_t) irb->scsw.cmd.cpa); 2271 (void *) (addr_t) irb->scsw.cmd.cpa);
2156 if (irb->esw.esw0.erw.cons) { 2272 if (irb->esw.esw0.erw.cons) {
2157 for (sl = 0; sl < 4; sl++) { 2273 for (sl = 0; sl < 4; sl++) {
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index bf512ac75b9e..892e2878d61b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -309,7 +309,8 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
309 do_gettimeofday(&tv); 309 do_gettimeofday(&tv);
310 header.tv_sec = tv.tv_sec; 310 header.tv_sec = tv.tv_sec;
311 header.tv_usec = tv.tv_usec; 311 header.tv_usec = tv.tv_usec;
312 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); 312 strncpy(header.busid, dev_name(&device->cdev->dev),
313 DASD_EER_BUSID_SIZE);
313 314
314 spin_lock_irqsave(&bufferlock, flags); 315 spin_lock_irqsave(&bufferlock, flags);
315 list_for_each_entry(eerb, &bufferlist, list) { 316 list_for_each_entry(eerb, &bufferlist, list) {
@@ -349,7 +350,8 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
349 do_gettimeofday(&tv); 350 do_gettimeofday(&tv);
350 header.tv_sec = tv.tv_sec; 351 header.tv_sec = tv.tv_sec;
351 header.tv_usec = tv.tv_usec; 352 header.tv_usec = tv.tv_usec;
352 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); 353 strncpy(header.busid, dev_name(&device->cdev->dev),
354 DASD_EER_BUSID_SIZE);
353 355
354 spin_lock_irqsave(&bufferlock, flags); 356 spin_lock_irqsave(&bufferlock, flags);
355 list_for_each_entry(eerb, &bufferlist, list) { 357 list_for_each_entry(eerb, &bufferlist, list) {
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index aa0c533423a5..93d9b6452a94 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -451,13 +451,13 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
451 } 451 }
452 len = sprintf(page, KERN_ERR PRINTK_HEADER 452 len = sprintf(page, KERN_ERR PRINTK_HEADER
453 " I/O status report for device %s:\n", 453 " I/O status report for device %s:\n",
454 device->cdev->dev.bus_id); 454 dev_name(&device->cdev->dev));
455 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 455 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
456 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 456 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
457 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 457 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
458 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 458 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
459 " device %s: Failing CCW: %p\n", 459 " device %s: Failing CCW: %p\n",
460 device->cdev->dev.bus_id, 460 dev_name(&device->cdev->dev),
461 (void *) (addr_t) irb->scsw.cmd.cpa); 461 (void *) (addr_t) irb->scsw.cmd.cpa);
462 if (irb->esw.esw0.erw.cons) { 462 if (irb->esw.esw0.erw.cons) {
463 for (sl = 0; sl < 4; sl++) { 463 for (sl = 0; sl < 4; sl++) {
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 31ecaa4a40e4..489d5fe488fb 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -126,7 +126,7 @@ do { \
126#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\ 126#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
127do { \ 127do { \
128 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ 128 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
129 d_device->cdev->dev.bus_id, d_args); \ 129 dev_name(&d_device->cdev->dev), d_args); \
130 DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \ 130 DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
131} while(0) 131} while(0)
132 132
@@ -140,7 +140,7 @@ do { \
140#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\ 140#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
141do { \ 141do { \
142 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ 142 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
143 d_device->cdev->dev.bus_id, d_args); \ 143 dev_name(&d_device->cdev->dev), d_args); \
144} while(0) 144} while(0)
145 145
146#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\ 146#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 03c0e40a92ff..9088de84b45d 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -67,7 +67,7 @@ dasd_devices_show(struct seq_file *m, void *v)
67 return 0; 67 return 0;
68 } 68 }
69 /* Print device number. */ 69 /* Print device number. */
70 seq_printf(m, "%s", device->cdev->dev.bus_id); 70 seq_printf(m, "%s", dev_name(&device->cdev->dev));
71 /* Print discipline string. */ 71 /* Print discipline string. */
72 if (device != NULL && device->discipline != NULL) 72 if (device != NULL && device->discipline != NULL)
73 seq_printf(m, "(%s)", device->discipline->name); 73 seq_printf(m, "(%s)", device->discipline->name);
@@ -76,7 +76,8 @@ dasd_devices_show(struct seq_file *m, void *v)
76 /* Print kdev. */ 76 /* Print kdev. */
77 if (block->gdp) 77 if (block->gdp)
78 seq_printf(m, " at (%3d:%6d)", 78 seq_printf(m, " at (%3d:%6d)",
79 block->gdp->major, block->gdp->first_minor); 79 MAJOR(disk_devt(block->gdp)),
80 MINOR(disk_devt(block->gdp)));
80 else 81 else
81 seq_printf(m, " at (???:??????)"); 82 seq_printf(m, " at (???:??????)");
82 /* Print device name. */ 83 /* Print device name. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 711b3004b3e6..a7ff167d5b81 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,6 @@
31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) 31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) 32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
33 33
34
35static int dcssblk_open(struct inode *inode, struct file *filp); 34static int dcssblk_open(struct inode *inode, struct file *filp);
36static int dcssblk_release(struct inode *inode, struct file *filp); 35static int dcssblk_release(struct inode *inode, struct file *filp);
37static int dcssblk_make_request(struct request_queue *q, struct bio *bio); 36static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
@@ -48,6 +47,30 @@ static struct block_device_operations dcssblk_devops = {
48 .direct_access = dcssblk_direct_access, 47 .direct_access = dcssblk_direct_access,
49}; 48};
50 49
50struct dcssblk_dev_info {
51 struct list_head lh;
52 struct device dev;
53 char segment_name[BUS_ID_SIZE];
54 atomic_t use_count;
55 struct gendisk *gd;
56 unsigned long start;
57 unsigned long end;
58 int segment_type;
59 unsigned char save_pending;
60 unsigned char is_shared;
61 struct request_queue *dcssblk_queue;
62 int num_of_segments;
63 struct list_head seg_list;
64};
65
66struct segment_info {
67 struct list_head lh;
68 char segment_name[BUS_ID_SIZE];
69 unsigned long start;
70 unsigned long end;
71 int segment_type;
72};
73
51static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, 74static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
52 size_t count); 75 size_t count);
53static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 76static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
@@ -58,30 +81,20 @@ static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *at
58static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf, 81static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
59 size_t count); 82 size_t count);
60static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf); 83static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
84static ssize_t dcssblk_seglist_show(struct device *dev,
85 struct device_attribute *attr,
86 char *buf);
61 87
62static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 88static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
63static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 89static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
64static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show, 90static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
65 dcssblk_save_store); 91 dcssblk_save_store);
66static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show, 92static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
67 dcssblk_shared_store); 93 dcssblk_shared_store);
94static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
68 95
69static struct device *dcssblk_root_dev; 96static struct device *dcssblk_root_dev;
70 97
71struct dcssblk_dev_info {
72 struct list_head lh;
73 struct device dev;
74 char segment_name[BUS_ID_SIZE];
75 atomic_t use_count;
76 struct gendisk *gd;
77 unsigned long start;
78 unsigned long end;
79 int segment_type;
80 unsigned char save_pending;
81 unsigned char is_shared;
82 struct request_queue *dcssblk_queue;
83};
84
85static LIST_HEAD(dcssblk_devices); 98static LIST_HEAD(dcssblk_devices);
86static struct rw_semaphore dcssblk_devices_sem; 99static struct rw_semaphore dcssblk_devices_sem;
87 100
@@ -91,8 +104,15 @@ static struct rw_semaphore dcssblk_devices_sem;
91static void 104static void
92dcssblk_release_segment(struct device *dev) 105dcssblk_release_segment(struct device *dev)
93{ 106{
94 PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id); 107 struct dcssblk_dev_info *dev_info;
95 kfree(container_of(dev, struct dcssblk_dev_info, dev)); 108 struct segment_info *entry, *temp;
109
110 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
111 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
112 list_del(&entry->lh);
113 kfree(entry);
114 }
115 kfree(dev_info);
96 module_put(THIS_MODULE); 116 module_put(THIS_MODULE);
97} 117}
98 118
@@ -114,7 +134,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
114 found = 0; 134 found = 0;
115 // test if minor available 135 // test if minor available
116 list_for_each_entry(entry, &dcssblk_devices, lh) 136 list_for_each_entry(entry, &dcssblk_devices, lh)
117 if (minor == entry->gd->first_minor) 137 if (minor == MINOR(disk_devt(entry->gd)))
118 found++; 138 found++;
119 if (!found) break; // got unused minor 139 if (!found) break; // got unused minor
120 } 140 }
@@ -142,6 +162,169 @@ dcssblk_get_device_by_name(char *name)
142 return NULL; 162 return NULL;
143} 163}
144 164
165/*
166 * get the struct segment_info from seg_list
167 * for the given name.
168 * down_read(&dcssblk_devices_sem) must be held.
169 */
170static struct segment_info *
171dcssblk_get_segment_by_name(char *name)
172{
173 struct dcssblk_dev_info *dev_info;
174 struct segment_info *entry;
175
176 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
177 list_for_each_entry(entry, &dev_info->seg_list, lh) {
178 if (!strcmp(name, entry->segment_name))
179 return entry;
180 }
181 }
182 return NULL;
183}
184
185/*
186 * get the highest address of the multi-segment block.
187 */
188static unsigned long
189dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
190{
191 unsigned long highest_addr;
192 struct segment_info *entry;
193
194 highest_addr = 0;
195 list_for_each_entry(entry, &dev_info->seg_list, lh) {
196 if (highest_addr < entry->end)
197 highest_addr = entry->end;
198 }
199 return highest_addr;
200}
201
202/*
203 * get the lowest address of the multi-segment block.
204 */
205static unsigned long
206dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
207{
208 int set_first;
209 unsigned long lowest_addr;
210 struct segment_info *entry;
211
212 set_first = 0;
213 lowest_addr = 0;
214 list_for_each_entry(entry, &dev_info->seg_list, lh) {
215 if (set_first == 0) {
216 lowest_addr = entry->start;
217 set_first = 1;
218 } else {
219 if (lowest_addr > entry->start)
220 lowest_addr = entry->start;
221 }
222 }
223 return lowest_addr;
224}
225
226/*
227 * Check continuity of segments.
228 */
229static int
230dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
231{
232 int i, j, rc;
233 struct segment_info *sort_list, *entry, temp;
234
235 if (dev_info->num_of_segments <= 1)
236 return 0;
237
238 sort_list = kzalloc(
239 sizeof(struct segment_info) * dev_info->num_of_segments,
240 GFP_KERNEL);
241 if (sort_list == NULL)
242 return -ENOMEM;
243 i = 0;
244 list_for_each_entry(entry, &dev_info->seg_list, lh) {
245 memcpy(&sort_list[i], entry, sizeof(struct segment_info));
246 i++;
247 }
248
249 /* sort segments */
250 for (i = 0; i < dev_info->num_of_segments; i++)
251 for (j = 0; j < dev_info->num_of_segments; j++)
252 if (sort_list[j].start > sort_list[i].start) {
253 memcpy(&temp, &sort_list[i],
254 sizeof(struct segment_info));
255 memcpy(&sort_list[i], &sort_list[j],
256 sizeof(struct segment_info));
257 memcpy(&sort_list[j], &temp,
258 sizeof(struct segment_info));
259 }
260
261 /* check continuity */
262 for (i = 0; i < dev_info->num_of_segments - 1; i++) {
263 if ((sort_list[i].end + 1) != sort_list[i+1].start) {
264 PRINT_ERR("Segment %s is not contiguous with "
265 "segment %s\n",
266 sort_list[i].segment_name,
267 sort_list[i+1].segment_name);
268 rc = -EINVAL;
269 goto out;
270 }
271 /* EN and EW are allowed in a block device */
272 if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
273 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
274 (sort_list[i].segment_type == SEG_TYPE_ER) ||
275 !(sort_list[i+1].segment_type &
276 SEGMENT_EXCLUSIVE) ||
277 (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
278 PRINT_ERR("Segment %s has different type from "
279 "segment %s\n",
280 sort_list[i].segment_name,
281 sort_list[i+1].segment_name);
282 rc = -EINVAL;
283 goto out;
284 }
285 }
286 }
287 rc = 0;
288out:
289 kfree(sort_list);
290 return rc;
291}
292
293/*
294 * Load a segment
295 */
296static int
297dcssblk_load_segment(char *name, struct segment_info **seg_info)
298{
299 int rc;
300
301 /* already loaded? */
302 down_read(&dcssblk_devices_sem);
303 *seg_info = dcssblk_get_segment_by_name(name);
304 up_read(&dcssblk_devices_sem);
305 if (*seg_info != NULL)
306 return -EEXIST;
307
308 /* get a struct segment_info */
309 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
310 if (*seg_info == NULL)
311 return -ENOMEM;
312
313 strcpy((*seg_info)->segment_name, name);
314
315 /* load the segment */
316 rc = segment_load(name, SEGMENT_SHARED,
317 &(*seg_info)->start, &(*seg_info)->end);
318 if (rc < 0) {
319 segment_warning(rc, (*seg_info)->segment_name);
320 kfree(*seg_info);
321 } else {
322 INIT_LIST_HEAD(&(*seg_info)->lh);
323 (*seg_info)->segment_type = rc;
324 }
325 return rc;
326}
327
145static void dcssblk_unregister_callback(struct device *dev) 328static void dcssblk_unregister_callback(struct device *dev)
146{ 329{
147 device_unregister(dev); 330 device_unregister(dev);
@@ -165,6 +348,7 @@ static ssize_t
165dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 348dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
166{ 349{
167 struct dcssblk_dev_info *dev_info; 350 struct dcssblk_dev_info *dev_info;
351 struct segment_info *entry, *temp;
168 int rc; 352 int rc;
169 353
170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 354 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
@@ -172,46 +356,46 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
172 down_write(&dcssblk_devices_sem); 356 down_write(&dcssblk_devices_sem);
173 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 357 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
174 if (atomic_read(&dev_info->use_count)) { 358 if (atomic_read(&dev_info->use_count)) {
175 PRINT_ERR("share: segment %s is busy!\n",
176 dev_info->segment_name);
177 rc = -EBUSY; 359 rc = -EBUSY;
178 goto out; 360 goto out;
179 } 361 }
180 if (inbuf[0] == '1') { 362 if (inbuf[0] == '1') {
181 // reload segment in shared mode 363 /* reload segments in shared mode */
182 rc = segment_modify_shared(dev_info->segment_name, 364 list_for_each_entry(entry, &dev_info->seg_list, lh) {
183 SEGMENT_SHARED); 365 rc = segment_modify_shared(entry->segment_name,
184 if (rc < 0) { 366 SEGMENT_SHARED);
185 BUG_ON(rc == -EINVAL); 367 if (rc < 0) {
186 if (rc != -EAGAIN) 368 BUG_ON(rc == -EINVAL);
187 goto removeseg; 369 if (rc != -EAGAIN)
188 } else { 370 goto removeseg;
189 dev_info->is_shared = 1;
190 switch (dev_info->segment_type) {
191 case SEG_TYPE_SR:
192 case SEG_TYPE_ER:
193 case SEG_TYPE_SC:
194 set_disk_ro(dev_info->gd,1);
195 } 371 }
196 } 372 }
373 dev_info->is_shared = 1;
374 switch (dev_info->segment_type) {
375 case SEG_TYPE_SR:
376 case SEG_TYPE_ER:
377 case SEG_TYPE_SC:
378 set_disk_ro(dev_info->gd, 1);
379 }
197 } else if (inbuf[0] == '0') { 380 } else if (inbuf[0] == '0') {
198 // reload segment in exclusive mode 381 /* reload segments in exclusive mode */
199 if (dev_info->segment_type == SEG_TYPE_SC) { 382 if (dev_info->segment_type == SEG_TYPE_SC) {
200 PRINT_ERR("Segment type SC (%s) cannot be loaded in " 383 PRINT_ERR("Segment type SC (%s) cannot be loaded in "
201 "non-shared mode\n", dev_info->segment_name); 384 "non-shared mode\n", dev_info->segment_name);
202 rc = -EINVAL; 385 rc = -EINVAL;
203 goto out; 386 goto out;
204 } 387 }
205 rc = segment_modify_shared(dev_info->segment_name, 388 list_for_each_entry(entry, &dev_info->seg_list, lh) {
206 SEGMENT_EXCLUSIVE); 389 rc = segment_modify_shared(entry->segment_name,
207 if (rc < 0) { 390 SEGMENT_EXCLUSIVE);
208 BUG_ON(rc == -EINVAL); 391 if (rc < 0) {
209 if (rc != -EAGAIN) 392 BUG_ON(rc == -EINVAL);
210 goto removeseg; 393 if (rc != -EAGAIN)
211 } else { 394 goto removeseg;
212 dev_info->is_shared = 0; 395 }
213 set_disk_ro(dev_info->gd, 0);
214 } 396 }
397 dev_info->is_shared = 0;
398 set_disk_ro(dev_info->gd, 0);
215 } else { 399 } else {
216 rc = -EINVAL; 400 rc = -EINVAL;
217 goto out; 401 goto out;
@@ -220,8 +404,14 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
220 goto out; 404 goto out;
221 405
222removeseg: 406removeseg:
223 PRINT_ERR("Could not reload segment %s, removing it now!\n", 407 PRINT_ERR("Could not reload segment(s) of the device %s, removing "
224 dev_info->segment_name); 408 "segment(s) now!\n",
409 dev_info->segment_name);
410 temp = entry;
411 list_for_each_entry(entry, &dev_info->seg_list, lh) {
412 if (entry != temp)
413 segment_unload(entry->segment_name);
414 }
225 list_del(&dev_info->lh); 415 list_del(&dev_info->lh);
226 416
227 del_gendisk(dev_info->gd); 417 del_gendisk(dev_info->gd);
@@ -254,6 +444,7 @@ static ssize_t
254dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 444dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
255{ 445{
256 struct dcssblk_dev_info *dev_info; 446 struct dcssblk_dev_info *dev_info;
447 struct segment_info *entry;
257 448
258 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 449 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
259 return -EINVAL; 450 return -EINVAL;
@@ -263,14 +454,16 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
263 if (inbuf[0] == '1') { 454 if (inbuf[0] == '1') {
264 if (atomic_read(&dev_info->use_count) == 0) { 455 if (atomic_read(&dev_info->use_count) == 0) {
265 // device is idle => we save immediately 456 // device is idle => we save immediately
266 PRINT_INFO("Saving segment %s\n", 457 PRINT_INFO("Saving segment(s) of the device %s\n",
267 dev_info->segment_name); 458 dev_info->segment_name);
268 segment_save(dev_info->segment_name); 459 list_for_each_entry(entry, &dev_info->seg_list, lh) {
460 segment_save(entry->segment_name);
461 }
269 } else { 462 } else {
270 // device is busy => we save it when it becomes 463 // device is busy => we save it when it becomes
271 // idle in dcssblk_release 464 // idle in dcssblk_release
272 PRINT_INFO("Segment %s is currently busy, it will " 465 PRINT_INFO("Device %s is currently busy, segment(s) "
273 "be saved when it becomes idle...\n", 466 "will be saved when it becomes idle...\n",
274 dev_info->segment_name); 467 dev_info->segment_name);
275 dev_info->save_pending = 1; 468 dev_info->save_pending = 1;
276 } 469 }
@@ -279,7 +472,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
279 // device is busy & the user wants to undo his save 472 // device is busy & the user wants to undo his save
280 // request 473 // request
281 dev_info->save_pending = 0; 474 dev_info->save_pending = 0;
282 PRINT_INFO("Pending save for segment %s deactivated\n", 475 PRINT_INFO("Pending save for segment(s) of the device "
476 "%s deactivated\n",
283 dev_info->segment_name); 477 dev_info->segment_name);
284 } 478 }
285 } else { 479 } else {
@@ -291,66 +485,123 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
291} 485}
292 486
293/* 487/*
488 * device attribute for showing all segments in a device
489 */
490static ssize_t
491dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
492 char *buf)
493{
494 int i;
495
496 struct dcssblk_dev_info *dev_info;
497 struct segment_info *entry;
498
499 down_read(&dcssblk_devices_sem);
500 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
501 i = 0;
502 buf[0] = '\0';
503 list_for_each_entry(entry, &dev_info->seg_list, lh) {
504 strcpy(&buf[i], entry->segment_name);
505 i += strlen(entry->segment_name);
506 buf[i] = '\n';
507 i++;
508 }
509 up_read(&dcssblk_devices_sem);
510 return i;
511}
512
513/*
294 * device attribute for adding devices 514 * device attribute for adding devices
295 */ 515 */
296static ssize_t 516static ssize_t
297dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 517dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
298{ 518{
299 int rc, i; 519 int rc, i, j, num_of_segments;
300 struct dcssblk_dev_info *dev_info; 520 struct dcssblk_dev_info *dev_info;
521 struct segment_info *seg_info, *temp;
301 char *local_buf; 522 char *local_buf;
302 unsigned long seg_byte_size; 523 unsigned long seg_byte_size;
303 524
304 dev_info = NULL; 525 dev_info = NULL;
526 seg_info = NULL;
305 if (dev != dcssblk_root_dev) { 527 if (dev != dcssblk_root_dev) {
306 rc = -EINVAL; 528 rc = -EINVAL;
307 goto out_nobuf; 529 goto out_nobuf;
308 } 530 }
531 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
532 rc = -ENAMETOOLONG;
533 goto out_nobuf;
534 }
535
309 local_buf = kmalloc(count + 1, GFP_KERNEL); 536 local_buf = kmalloc(count + 1, GFP_KERNEL);
310 if (local_buf == NULL) { 537 if (local_buf == NULL) {
311 rc = -ENOMEM; 538 rc = -ENOMEM;
312 goto out_nobuf; 539 goto out_nobuf;
313 } 540 }
541
314 /* 542 /*
315 * parse input 543 * parse input
316 */ 544 */
545 num_of_segments = 0;
317 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { 546 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
318 local_buf[i] = toupper(buf[i]); 547 for (j = i; (buf[j] != ':') &&
548 (buf[j] != '\0') &&
549 (buf[j] != '\n') &&
550 j < count; j++) {
551 local_buf[j-i] = toupper(buf[j]);
552 }
553 local_buf[j-i] = '\0';
554 if (((j - i) == 0) || ((j - i) > 8)) {
555 rc = -ENAMETOOLONG;
556 goto seg_list_del;
557 }
558
559 rc = dcssblk_load_segment(local_buf, &seg_info);
560 if (rc < 0)
561 goto seg_list_del;
562 /*
563 * get a struct dcssblk_dev_info
564 */
565 if (num_of_segments == 0) {
566 dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
567 GFP_KERNEL);
568 if (dev_info == NULL) {
569 rc = -ENOMEM;
570 goto out;
571 }
572 strcpy(dev_info->segment_name, local_buf);
573 dev_info->segment_type = seg_info->segment_type;
574 INIT_LIST_HEAD(&dev_info->seg_list);
575 }
576 list_add_tail(&seg_info->lh, &dev_info->seg_list);
577 num_of_segments++;
578 i = j;
579
580 if ((buf[j] == '\0') || (buf[j] == '\n'))
581 break;
319 } 582 }
320 local_buf[i] = '\0'; 583
321 if ((i == 0) || (i > 8)) { 584 /* no trailing colon at the end of the input */
585 if ((i > 0) && (buf[i-1] == ':')) {
322 rc = -ENAMETOOLONG; 586 rc = -ENAMETOOLONG;
323 goto out; 587 goto seg_list_del;
324 }
325 /*
326 * already loaded?
327 */
328 down_read(&dcssblk_devices_sem);
329 dev_info = dcssblk_get_device_by_name(local_buf);
330 up_read(&dcssblk_devices_sem);
331 if (dev_info != NULL) {
332 PRINT_WARN("Segment %s already loaded!\n", local_buf);
333 rc = -EEXIST;
334 goto out;
335 }
336 /*
337 * get a struct dcssblk_dev_info
338 */
339 dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
340 if (dev_info == NULL) {
341 rc = -ENOMEM;
342 goto out;
343 } 588 }
589 strlcpy(local_buf, buf, i + 1);
590 dev_info->num_of_segments = num_of_segments;
591 rc = dcssblk_is_continuous(dev_info);
592 if (rc < 0)
593 goto seg_list_del;
594
595 dev_info->start = dcssblk_find_lowest_addr(dev_info);
596 dev_info->end = dcssblk_find_highest_addr(dev_info);
344 597
345 strcpy(dev_info->segment_name, local_buf); 598 dev_set_name(&dev_info->dev, dev_info->segment_name);
346 strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
347 dev_info->dev.release = dcssblk_release_segment; 599 dev_info->dev.release = dcssblk_release_segment;
348 INIT_LIST_HEAD(&dev_info->lh); 600 INIT_LIST_HEAD(&dev_info->lh);
349
350 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); 601 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
351 if (dev_info->gd == NULL) { 602 if (dev_info->gd == NULL) {
352 rc = -ENOMEM; 603 rc = -ENOMEM;
353 goto free_dev_info; 604 goto seg_list_del;
354 } 605 }
355 dev_info->gd->major = dcssblk_major; 606 dev_info->gd->major = dcssblk_major;
356 dev_info->gd->fops = &dcssblk_devops; 607 dev_info->gd->fops = &dcssblk_devops;
@@ -360,59 +611,43 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
360 dev_info->gd->driverfs_dev = &dev_info->dev; 611 dev_info->gd->driverfs_dev = &dev_info->dev;
361 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 612 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
362 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 613 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
363 /* 614
364 * load the segment
365 */
366 rc = segment_load(local_buf, SEGMENT_SHARED,
367 &dev_info->start, &dev_info->end);
368 if (rc < 0) {
369 segment_warning(rc, dev_info->segment_name);
370 goto dealloc_gendisk;
371 }
372 seg_byte_size = (dev_info->end - dev_info->start + 1); 615 seg_byte_size = (dev_info->end - dev_info->start + 1);
373 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 616 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
374 PRINT_INFO("Loaded segment %s, size = %lu Byte, " 617 PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, "
375 "capacity = %lu (512 Byte) sectors\n", local_buf, 618 "capacity = %lu (512 Byte) sectors\n", local_buf,
376 seg_byte_size, seg_byte_size >> 9); 619 seg_byte_size, seg_byte_size >> 9);
377 620
378 dev_info->segment_type = rc;
379 dev_info->save_pending = 0; 621 dev_info->save_pending = 0;
380 dev_info->is_shared = 1; 622 dev_info->is_shared = 1;
381 dev_info->dev.parent = dcssblk_root_dev; 623 dev_info->dev.parent = dcssblk_root_dev;
382 624
383 /* 625 /*
384 * get minor, add to list 626 *get minor, add to list
385 */ 627 */
386 down_write(&dcssblk_devices_sem); 628 down_write(&dcssblk_devices_sem);
387 if (dcssblk_get_device_by_name(local_buf)) { 629 if (dcssblk_get_segment_by_name(local_buf)) {
388 up_write(&dcssblk_devices_sem);
389 rc = -EEXIST; 630 rc = -EEXIST;
390 goto unload_seg; 631 goto release_gd;
391 } 632 }
392 rc = dcssblk_assign_free_minor(dev_info); 633 rc = dcssblk_assign_free_minor(dev_info);
393 if (rc) { 634 if (rc)
394 up_write(&dcssblk_devices_sem); 635 goto release_gd;
395 PRINT_ERR("No free minor number available! "
396 "Unloading segment...\n");
397 goto unload_seg;
398 }
399 sprintf(dev_info->gd->disk_name, "dcssblk%d", 636 sprintf(dev_info->gd->disk_name, "dcssblk%d",
400 dev_info->gd->first_minor); 637 MINOR(disk_devt(dev_info->gd)));
401 list_add_tail(&dev_info->lh, &dcssblk_devices); 638 list_add_tail(&dev_info->lh, &dcssblk_devices);
402 639
403 if (!try_module_get(THIS_MODULE)) { 640 if (!try_module_get(THIS_MODULE)) {
404 rc = -ENODEV; 641 rc = -ENODEV;
405 goto list_del; 642 goto dev_list_del;
406 } 643 }
407 /* 644 /*
408 * register the device 645 * register the device
409 */ 646 */
410 rc = device_register(&dev_info->dev); 647 rc = device_register(&dev_info->dev);
411 if (rc) { 648 if (rc) {
412 PRINT_ERR("Segment %s could not be registered RC=%d\n",
413 local_buf, rc);
414 module_put(THIS_MODULE); 649 module_put(THIS_MODULE);
415 goto list_del; 650 goto dev_list_del;
416 } 651 }
417 get_device(&dev_info->dev); 652 get_device(&dev_info->dev);
418 rc = device_create_file(&dev_info->dev, &dev_attr_shared); 653 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
@@ -421,6 +656,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
421 rc = device_create_file(&dev_info->dev, &dev_attr_save); 656 rc = device_create_file(&dev_info->dev, &dev_attr_save);
422 if (rc) 657 if (rc)
423 goto unregister_dev; 658 goto unregister_dev;
659 rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
660 if (rc)
661 goto unregister_dev;
424 662
425 add_disk(dev_info->gd); 663 add_disk(dev_info->gd);
426 664
@@ -434,7 +672,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
434 set_disk_ro(dev_info->gd,0); 672 set_disk_ro(dev_info->gd,0);
435 break; 673 break;
436 } 674 }
437 PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
438 up_write(&dcssblk_devices_sem); 675 up_write(&dcssblk_devices_sem);
439 rc = count; 676 rc = count;
440 goto out; 677 goto out;
@@ -445,20 +682,27 @@ unregister_dev:
445 dev_info->gd->queue = NULL; 682 dev_info->gd->queue = NULL;
446 put_disk(dev_info->gd); 683 put_disk(dev_info->gd);
447 device_unregister(&dev_info->dev); 684 device_unregister(&dev_info->dev);
448 segment_unload(dev_info->segment_name); 685 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
686 segment_unload(seg_info->segment_name);
687 }
449 put_device(&dev_info->dev); 688 put_device(&dev_info->dev);
450 up_write(&dcssblk_devices_sem); 689 up_write(&dcssblk_devices_sem);
451 goto out; 690 goto out;
452list_del: 691dev_list_del:
453 list_del(&dev_info->lh); 692 list_del(&dev_info->lh);
454 up_write(&dcssblk_devices_sem); 693release_gd:
455unload_seg:
456 segment_unload(local_buf);
457dealloc_gendisk:
458 blk_cleanup_queue(dev_info->dcssblk_queue); 694 blk_cleanup_queue(dev_info->dcssblk_queue);
459 dev_info->gd->queue = NULL; 695 dev_info->gd->queue = NULL;
460 put_disk(dev_info->gd); 696 put_disk(dev_info->gd);
461free_dev_info: 697 up_write(&dcssblk_devices_sem);
698seg_list_del:
699 if (dev_info == NULL)
700 goto out;
701 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
702 list_del(&seg_info->lh);
703 segment_unload(seg_info->segment_name);
704 kfree(seg_info);
705 }
462 kfree(dev_info); 706 kfree(dev_info);
463out: 707out:
464 kfree(local_buf); 708 kfree(local_buf);
@@ -473,6 +717,7 @@ static ssize_t
473dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 717dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
474{ 718{
475 struct dcssblk_dev_info *dev_info; 719 struct dcssblk_dev_info *dev_info;
720 struct segment_info *entry;
476 int rc, i; 721 int rc, i;
477 char *local_buf; 722 char *local_buf;
478 723
@@ -499,26 +744,28 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
499 dev_info = dcssblk_get_device_by_name(local_buf); 744 dev_info = dcssblk_get_device_by_name(local_buf);
500 if (dev_info == NULL) { 745 if (dev_info == NULL) {
501 up_write(&dcssblk_devices_sem); 746 up_write(&dcssblk_devices_sem);
502 PRINT_WARN("Segment %s is not loaded!\n", local_buf); 747 PRINT_WARN("Device %s is not loaded!\n", local_buf);
503 rc = -ENODEV; 748 rc = -ENODEV;
504 goto out_buf; 749 goto out_buf;
505 } 750 }
506 if (atomic_read(&dev_info->use_count) != 0) { 751 if (atomic_read(&dev_info->use_count) != 0) {
507 up_write(&dcssblk_devices_sem); 752 up_write(&dcssblk_devices_sem);
508 PRINT_WARN("Segment %s is in use!\n", local_buf); 753 PRINT_WARN("Device %s is in use!\n", local_buf);
509 rc = -EBUSY; 754 rc = -EBUSY;
510 goto out_buf; 755 goto out_buf;
511 } 756 }
512 list_del(&dev_info->lh);
513 757
758 list_del(&dev_info->lh);
514 del_gendisk(dev_info->gd); 759 del_gendisk(dev_info->gd);
515 blk_cleanup_queue(dev_info->dcssblk_queue); 760 blk_cleanup_queue(dev_info->dcssblk_queue);
516 dev_info->gd->queue = NULL; 761 dev_info->gd->queue = NULL;
517 put_disk(dev_info->gd); 762 put_disk(dev_info->gd);
518 device_unregister(&dev_info->dev); 763 device_unregister(&dev_info->dev);
519 segment_unload(dev_info->segment_name); 764
520 PRINT_DEBUG("Segment %s unloaded successfully\n", 765 /* unload all related segments */
521 dev_info->segment_name); 766 list_for_each_entry(entry, &dev_info->seg_list, lh)
767 segment_unload(entry->segment_name);
768
522 put_device(&dev_info->dev); 769 put_device(&dev_info->dev);
523 up_write(&dcssblk_devices_sem); 770 up_write(&dcssblk_devices_sem);
524 771
@@ -550,6 +797,7 @@ static int
550dcssblk_release(struct inode *inode, struct file *filp) 797dcssblk_release(struct inode *inode, struct file *filp)
551{ 798{
552 struct dcssblk_dev_info *dev_info; 799 struct dcssblk_dev_info *dev_info;
800 struct segment_info *entry;
553 int rc; 801 int rc;
554 802
555 dev_info = inode->i_bdev->bd_disk->private_data; 803 dev_info = inode->i_bdev->bd_disk->private_data;
@@ -560,9 +808,11 @@ dcssblk_release(struct inode *inode, struct file *filp)
560 down_write(&dcssblk_devices_sem); 808 down_write(&dcssblk_devices_sem);
561 if (atomic_dec_and_test(&dev_info->use_count) 809 if (atomic_dec_and_test(&dev_info->use_count)
562 && (dev_info->save_pending)) { 810 && (dev_info->save_pending)) {
563 PRINT_INFO("Segment %s became idle and is being saved now\n", 811 PRINT_INFO("Device %s became idle and is being saved now\n",
564 dev_info->segment_name); 812 dev_info->segment_name);
565 segment_save(dev_info->segment_name); 813 list_for_each_entry(entry, &dev_info->seg_list, lh) {
814 segment_save(entry->segment_name);
815 }
566 dev_info->save_pending = 0; 816 dev_info->save_pending = 0;
567 } 817 }
568 up_write(&dcssblk_devices_sem); 818 up_write(&dcssblk_devices_sem);
@@ -602,7 +852,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
602 case SEG_TYPE_SC: 852 case SEG_TYPE_SC:
603 /* cannot write to these segments */ 853 /* cannot write to these segments */
604 if (bio_data_dir(bio) == WRITE) { 854 if (bio_data_dir(bio) == WRITE) {
605 PRINT_WARN("rejecting write to ro segment %s\n", dev_info->dev.bus_id); 855 PRINT_WARN("rejecting write to ro device %s\n",
856 dev_name(&dev_info->dev));
606 goto fail; 857 goto fail;
607 } 858 }
608 } 859 }
@@ -657,7 +908,7 @@ static void
657dcssblk_check_params(void) 908dcssblk_check_params(void)
658{ 909{
659 int rc, i, j, k; 910 int rc, i, j, k;
660 char buf[9]; 911 char buf[DCSSBLK_PARM_LEN + 1];
661 struct dcssblk_dev_info *dev_info; 912 struct dcssblk_dev_info *dev_info;
662 913
663 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); 914 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
@@ -665,15 +916,16 @@ dcssblk_check_params(void)
665 for (j = i; (dcssblk_segments[j] != ',') && 916 for (j = i; (dcssblk_segments[j] != ',') &&
666 (dcssblk_segments[j] != '\0') && 917 (dcssblk_segments[j] != '\0') &&
667 (dcssblk_segments[j] != '(') && 918 (dcssblk_segments[j] != '(') &&
668 (j - i) < 8; j++) 919 (j < DCSSBLK_PARM_LEN); j++)
669 { 920 {
670 buf[j-i] = dcssblk_segments[j]; 921 buf[j-i] = dcssblk_segments[j];
671 } 922 }
672 buf[j-i] = '\0'; 923 buf[j-i] = '\0';
673 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); 924 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
674 if ((rc >= 0) && (dcssblk_segments[j] == '(')) { 925 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
675 for (k = 0; buf[k] != '\0'; k++) 926 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
676 buf[k] = toupper(buf[k]); 927 buf[k] = toupper(buf[k]);
928 buf[k] = '\0';
677 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { 929 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
678 down_read(&dcssblk_devices_sem); 930 down_read(&dcssblk_devices_sem);
679 dev_info = dcssblk_get_device_by_name(buf); 931 dev_info = dcssblk_get_device_by_name(buf);
@@ -740,10 +992,12 @@ module_exit(dcssblk_exit);
740 992
741module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); 993module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
742MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " 994MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
743 "comma-separated list, each name max. 8 chars.\n" 995 "comma-separated list, names in each set separated "
744 "Adding \"(local)\" to segment name equals echoing 0 to " 996 "by commas are separated by colons, each set contains "
745 "/sys/devices/dcssblk/<segment name>/shared after loading " 997 "names of contiguous segments and each name max. 8 chars.\n"
746 "the segment - \n" 998 "Adding \"(local)\" to the end of each set equals echoing 0 "
747 "e.g. segments=\"mydcss1,mydcss2,mydcss3(local)\""); 999 "to /sys/devices/dcssblk/<device name>/shared after loading "
1000 "the contiguous segments - \n"
1001 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
748 1002
749MODULE_LICENSE("GPL"); 1003MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index dd9b986389a2..03916989ed2d 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -56,6 +56,7 @@ typedef struct {
56static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; 56static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
57static unsigned int xpram_sizes[XPRAM_MAX_DEVS]; 57static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
58static struct gendisk *xpram_disks[XPRAM_MAX_DEVS]; 58static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
59static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
59static unsigned int xpram_pages; 60static unsigned int xpram_pages;
60static int xpram_devs; 61static int xpram_devs;
61 62
@@ -330,18 +331,22 @@ static int __init xpram_setup_sizes(unsigned long pages)
330 return 0; 331 return 0;
331} 332}
332 333
333static struct request_queue *xpram_queue;
334
335static int __init xpram_setup_blkdev(void) 334static int __init xpram_setup_blkdev(void)
336{ 335{
337 unsigned long offset; 336 unsigned long offset;
338 int i, rc = -ENOMEM; 337 int i, rc = -ENOMEM;
339 338
340 for (i = 0; i < xpram_devs; i++) { 339 for (i = 0; i < xpram_devs; i++) {
341 struct gendisk *disk = alloc_disk(1); 340 xpram_disks[i] = alloc_disk(1);
342 if (!disk) 341 if (!xpram_disks[i])
342 goto out;
343 xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
344 if (!xpram_queues[i]) {
345 put_disk(xpram_disks[i]);
343 goto out; 346 goto out;
344 xpram_disks[i] = disk; 347 }
348 blk_queue_make_request(xpram_queues[i], xpram_make_request);
349 blk_queue_hardsect_size(xpram_queues[i], 4096);
345 } 350 }
346 351
347 /* 352 /*
@@ -352,18 +357,6 @@ static int __init xpram_setup_blkdev(void)
352 goto out; 357 goto out;
353 358
354 /* 359 /*
355 * Assign the other needed values: make request function, sizes and
356 * hardsect size. All the minor devices feature the same value.
357 */
358 xpram_queue = blk_alloc_queue(GFP_KERNEL);
359 if (!xpram_queue) {
360 rc = -ENOMEM;
361 goto out_unreg;
362 }
363 blk_queue_make_request(xpram_queue, xpram_make_request);
364 blk_queue_hardsect_size(xpram_queue, 4096);
365
366 /*
367 * Setup device structures. 360 * Setup device structures.
368 */ 361 */
369 offset = 0; 362 offset = 0;
@@ -377,18 +370,18 @@ static int __init xpram_setup_blkdev(void)
377 disk->first_minor = i; 370 disk->first_minor = i;
378 disk->fops = &xpram_devops; 371 disk->fops = &xpram_devops;
379 disk->private_data = &xpram_devices[i]; 372 disk->private_data = &xpram_devices[i];
380 disk->queue = xpram_queue; 373 disk->queue = xpram_queues[i];
381 sprintf(disk->disk_name, "slram%d", i); 374 sprintf(disk->disk_name, "slram%d", i);
382 set_capacity(disk, xpram_sizes[i] << 1); 375 set_capacity(disk, xpram_sizes[i] << 1);
383 add_disk(disk); 376 add_disk(disk);
384 } 377 }
385 378
386 return 0; 379 return 0;
387out_unreg:
388 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
389out: 380out:
390 while (i--) 381 while (i--) {
382 blk_cleanup_queue(xpram_queues[i]);
391 put_disk(xpram_disks[i]); 383 put_disk(xpram_disks[i]);
384 }
392 return rc; 385 return rc;
393} 386}
394 387
@@ -400,10 +393,10 @@ static void __exit xpram_exit(void)
400 int i; 393 int i;
401 for (i = 0; i < xpram_devs; i++) { 394 for (i = 0; i < xpram_devs; i++) {
402 del_gendisk(xpram_disks[i]); 395 del_gendisk(xpram_disks[i]);
396 blk_cleanup_queue(xpram_queues[i]);
403 put_disk(xpram_disks[i]); 397 put_disk(xpram_disks[i]);
404 } 398 }
405 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); 399 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
406 blk_cleanup_queue(xpram_queue);
407} 400}
408 401
409static int __init xpram_init(void) 402static int __init xpram_init(void)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index d3ec9b55ab35..9ab06e0dad40 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -21,6 +21,7 @@
21#include <linux/console.h> 21#include <linux/console.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/reboot.h>
24 25
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/bootmem.h> 27#include <linux/bootmem.h>
@@ -88,7 +89,6 @@ struct raw3215_info {
88 int count; /* number of bytes in output buffer */ 89 int count; /* number of bytes in output buffer */
89 int written; /* number of bytes in write requests */ 90 int written; /* number of bytes in write requests */
90 struct tty_struct *tty; /* pointer to tty structure if present */ 91 struct tty_struct *tty; /* pointer to tty structure if present */
91 struct tasklet_struct tasklet;
92 struct raw3215_req *queued_read; /* pointer to queued read requests */ 92 struct raw3215_req *queued_read; /* pointer to queued read requests */
93 struct raw3215_req *queued_write;/* pointer to queued write requests */ 93 struct raw3215_req *queued_write;/* pointer to queued write requests */
94 wait_queue_head_t empty_wait; /* wait queue for flushing */ 94 wait_queue_head_t empty_wait; /* wait queue for flushing */
@@ -341,21 +341,14 @@ raw3215_try_io(struct raw3215_info *raw)
341} 341}
342 342
343/* 343/*
344 * The bottom half handler routine for 3215 devices. It tries to start 344 * Try to start the next IO and wake up processes waiting on the tty.
345 * the next IO and wakes up processes waiting on the tty.
346 */ 345 */
347static void 346static void raw3215_next_io(struct raw3215_info *raw)
348raw3215_tasklet(void *data)
349{ 347{
350 struct raw3215_info *raw;
351 struct tty_struct *tty; 348 struct tty_struct *tty;
352 unsigned long flags;
353 349
354 raw = (struct raw3215_info *) data;
355 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
356 raw3215_mk_write_req(raw); 350 raw3215_mk_write_req(raw);
357 raw3215_try_io(raw); 351 raw3215_try_io(raw);
358 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
359 tty = raw->tty; 352 tty = raw->tty;
360 if (tty != NULL && 353 if (tty != NULL &&
361 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { 354 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@@ -380,7 +373,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
380 cstat = irb->scsw.cmd.cstat; 373 cstat = irb->scsw.cmd.cstat;
381 dstat = irb->scsw.cmd.dstat; 374 dstat = irb->scsw.cmd.dstat;
382 if (cstat != 0) 375 if (cstat != 0)
383 tasklet_schedule(&raw->tasklet); 376 raw3215_next_io(raw);
384 if (dstat & 0x01) { /* we got a unit exception */ 377 if (dstat & 0x01) { /* we got a unit exception */
385 dstat &= ~0x01; /* we can ignore it */ 378 dstat &= ~0x01; /* we can ignore it */
386 } 379 }
@@ -390,7 +383,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
390 break; 383 break;
391 /* Attention interrupt, someone hit the enter key */ 384 /* Attention interrupt, someone hit the enter key */
392 raw3215_mk_read_req(raw); 385 raw3215_mk_read_req(raw);
393 tasklet_schedule(&raw->tasklet); 386 raw3215_next_io(raw);
394 break; 387 break;
395 case 0x08: 388 case 0x08:
396 case 0x0C: 389 case 0x0C:
@@ -448,7 +441,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
448 raw->queued_read == NULL) { 441 raw->queued_read == NULL) {
449 wake_up_interruptible(&raw->empty_wait); 442 wake_up_interruptible(&raw->empty_wait);
450 } 443 }
451 tasklet_schedule(&raw->tasklet); 444 raw3215_next_io(raw);
452 break; 445 break;
453 default: 446 default:
454 /* Strange interrupt, I'll do my best to clean up */ 447 /* Strange interrupt, I'll do my best to clean up */
@@ -460,7 +453,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
460 raw->flags &= ~RAW3215_WORKING; 453 raw->flags &= ~RAW3215_WORKING;
461 raw3215_free_req(req); 454 raw3215_free_req(req);
462 } 455 }
463 tasklet_schedule(&raw->tasklet); 456 raw3215_next_io(raw);
464 } 457 }
465 return; 458 return;
466} 459}
@@ -674,9 +667,6 @@ raw3215_probe (struct ccw_device *cdev)
674 kfree(raw); 667 kfree(raw);
675 return -ENOMEM; 668 return -ENOMEM;
676 } 669 }
677 tasklet_init(&raw->tasklet,
678 (void (*)(unsigned long)) raw3215_tasklet,
679 (unsigned long) raw);
680 init_waitqueue_head(&raw->empty_wait); 670 init_waitqueue_head(&raw->empty_wait);
681 671
682 cdev->dev.driver_data = raw; 672 cdev->dev.driver_data = raw;
@@ -775,11 +765,11 @@ static struct tty_driver *con3215_device(struct console *c, int *index)
775} 765}
776 766
777/* 767/*
778 * panic() calls console_unblank before the system enters a 768 * panic() calls con3215_flush through a panic_notifier
779 * disabled, endless loop. 769 * before the system enters a disabled, endless loop.
780 */ 770 */
781static void 771static void
782con3215_unblank(void) 772con3215_flush(void)
783{ 773{
784 struct raw3215_info *raw; 774 struct raw3215_info *raw;
785 unsigned long flags; 775 unsigned long flags;
@@ -790,6 +780,23 @@ con3215_unblank(void)
790 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 780 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
791} 781}
792 782
783static int con3215_notify(struct notifier_block *self,
784 unsigned long event, void *data)
785{
786 con3215_flush();
787 return NOTIFY_OK;
788}
789
790static struct notifier_block on_panic_nb = {
791 .notifier_call = con3215_notify,
792 .priority = 0,
793};
794
795static struct notifier_block on_reboot_nb = {
796 .notifier_call = con3215_notify,
797 .priority = 0,
798};
799
793/* 800/*
794 * The console structure for the 3215 console 801 * The console structure for the 3215 console
795 */ 802 */
@@ -797,7 +804,6 @@ static struct console con3215 = {
797 .name = "ttyS", 804 .name = "ttyS",
798 .write = con3215_write, 805 .write = con3215_write,
799 .device = con3215_device, 806 .device = con3215_device,
800 .unblank = con3215_unblank,
801 .flags = CON_PRINTBUFFER, 807 .flags = CON_PRINTBUFFER,
802}; 808};
803 809
@@ -846,9 +852,6 @@ con3215_init(void)
846 cdev->handler = raw3215_irq; 852 cdev->handler = raw3215_irq;
847 853
848 raw->flags |= RAW3215_FIXED; 854 raw->flags |= RAW3215_FIXED;
849 tasklet_init(&raw->tasklet,
850 (void (*)(unsigned long)) raw3215_tasklet,
851 (unsigned long) raw);
852 init_waitqueue_head(&raw->empty_wait); 855 init_waitqueue_head(&raw->empty_wait);
853 856
854 /* Request the console irq */ 857 /* Request the console irq */
@@ -859,6 +862,8 @@ con3215_init(void)
859 raw3215[0] = NULL; 862 raw3215[0] = NULL;
860 return -ENODEV; 863 return -ENODEV;
861 } 864 }
865 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
866 register_reboot_notifier(&on_reboot_nb);
862 register_console(&con3215); 867 register_console(&con3215);
863 return 0; 868 return 0;
864} 869}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 3c07974886ed..d028d2ee83dd 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/reboot.h>
18 19
19#include <asm/ccwdev.h> 20#include <asm/ccwdev.h>
20#include <asm/cio.h> 21#include <asm/cio.h>
@@ -528,11 +529,11 @@ con3270_wait_write(struct con3270 *cp)
528} 529}
529 530
530/* 531/*
531 * panic() calls console_unblank before the system enters a 532 * panic() calls con3270_flush through a panic_notifier
532 * disabled, endless loop. 533 * before the system enters a disabled, endless loop.
533 */ 534 */
534static void 535static void
535con3270_unblank(void) 536con3270_flush(void)
536{ 537{
537 struct con3270 *cp; 538 struct con3270 *cp;
538 unsigned long flags; 539 unsigned long flags;
@@ -554,6 +555,23 @@ con3270_unblank(void)
554 spin_unlock_irqrestore(&cp->view.lock, flags); 555 spin_unlock_irqrestore(&cp->view.lock, flags);
555} 556}
556 557
558static int con3270_notify(struct notifier_block *self,
559 unsigned long event, void *data)
560{
561 con3270_flush();
562 return NOTIFY_OK;
563}
564
565static struct notifier_block on_panic_nb = {
566 .notifier_call = con3270_notify,
567 .priority = 0,
568};
569
570static struct notifier_block on_reboot_nb = {
571 .notifier_call = con3270_notify,
572 .priority = 0,
573};
574
557/* 575/*
558 * The console structure for the 3270 console 576 * The console structure for the 3270 console
559 */ 577 */
@@ -561,7 +579,6 @@ static struct console con3270 = {
561 .name = "tty3270", 579 .name = "tty3270",
562 .write = con3270_write, 580 .write = con3270_write,
563 .device = con3270_device, 581 .device = con3270_device,
564 .unblank = con3270_unblank,
565 .flags = CON_PRINTBUFFER, 582 .flags = CON_PRINTBUFFER,
566}; 583};
567 584
@@ -623,6 +640,8 @@ con3270_init(void)
623 condev->cline->len = 0; 640 condev->cline->len = 0;
624 con3270_create_status(condev); 641 con3270_create_status(condev);
625 condev->input = alloc_string(&condev->freemem, 80); 642 condev->input = alloc_string(&condev->freemem, 80);
643 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
644 register_reboot_notifier(&on_reboot_nb);
626 register_console(&con3270); 645 register_console(&con3270);
627 return 0; 646 return 0;
628} 647}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index d18e6d2e0b49..40759c33477d 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -418,25 +418,22 @@ fs3270_open(struct inode *inode, struct file *filp)
418{ 418{
419 struct fs3270 *fp; 419 struct fs3270 *fp;
420 struct idal_buffer *ib; 420 struct idal_buffer *ib;
421 int minor, rc; 421 int minor, rc = 0;
422 422
423 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) 423 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
424 return -ENODEV; 424 return -ENODEV;
425 lock_kernel();
426 minor = iminor(filp->f_path.dentry->d_inode); 425 minor = iminor(filp->f_path.dentry->d_inode);
427 /* Check for minor 0 multiplexer. */ 426 /* Check for minor 0 multiplexer. */
428 if (minor == 0) { 427 if (minor == 0) {
429 struct tty_struct *tty; 428 struct tty_struct *tty = get_current_tty();
430 mutex_lock(&tty_mutex);
431 tty = get_current_tty();
432 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { 429 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
433 mutex_unlock(&tty_mutex); 430 tty_kref_put(tty);
434 rc = -ENODEV; 431 return -ENODEV;
435 goto out;
436 } 432 }
437 minor = tty->index + RAW3270_FIRSTMINOR; 433 minor = tty->index + RAW3270_FIRSTMINOR;
438 mutex_unlock(&tty_mutex); 434 tty_kref_put(tty);
439 } 435 }
436 lock_kernel();
440 /* Check if some other program is already using fullscreen mode. */ 437 /* Check if some other program is already using fullscreen mode. */
441 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 438 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
442 if (!IS_ERR(fp)) { 439 if (!IS_ERR(fp)) {
@@ -478,7 +475,7 @@ fs3270_open(struct inode *inode, struct file *filp)
478 filp->private_data = fp; 475 filp->private_data = fp;
479out: 476out:
480 unlock_kernel(); 477 unlock_kernel();
481 return 0; 478 return rc;
482} 479}
483 480
484/* 481/*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index c3dee900a5c8..0b15cf107ec9 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1168,19 +1168,17 @@ static int raw3270_create_attributes(struct raw3270 *rp)
1168 if (rc) 1168 if (rc)
1169 goto out; 1169 goto out;
1170 1170
1171 rp->clttydev = device_create_drvdata(class3270, &rp->cdev->dev, 1171 rp->clttydev = device_create(class3270, &rp->cdev->dev,
1172 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1172 MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
1173 NULL, 1173 "tty%s", dev_name(&rp->cdev->dev));
1174 "tty%s", rp->cdev->dev.bus_id);
1175 if (IS_ERR(rp->clttydev)) { 1174 if (IS_ERR(rp->clttydev)) {
1176 rc = PTR_ERR(rp->clttydev); 1175 rc = PTR_ERR(rp->clttydev);
1177 goto out_ttydev; 1176 goto out_ttydev;
1178 } 1177 }
1179 1178
1180 rp->cltubdev = device_create_drvdata(class3270, &rp->cdev->dev, 1179 rp->cltubdev = device_create(class3270, &rp->cdev->dev,
1181 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1180 MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
1182 NULL, 1181 "tub%s", dev_name(&rp->cdev->dev));
1183 "tub%s", rp->cdev->dev.bus_id);
1184 if (!IS_ERR(rp->cltubdev)) 1182 if (!IS_ERR(rp->cltubdev))
1185 goto out; 1183 goto out;
1186 1184
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 7e619c534bf4..9a25c4bd1421 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -16,6 +16,7 @@
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/termios.h> 17#include <linux/termios.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/reboot.h>
19 20
20#include "sclp.h" 21#include "sclp.h"
21#include "sclp_rw.h" 22#include "sclp_rw.h"
@@ -172,7 +173,7 @@ sclp_console_device(struct console *c, int *index)
172 * will be flushed to the SCLP. 173 * will be flushed to the SCLP.
173 */ 174 */
174static void 175static void
175sclp_console_unblank(void) 176sclp_console_flush(void)
176{ 177{
177 unsigned long flags; 178 unsigned long flags;
178 179
@@ -188,6 +189,24 @@ sclp_console_unblank(void)
188 spin_unlock_irqrestore(&sclp_con_lock, flags); 189 spin_unlock_irqrestore(&sclp_con_lock, flags);
189} 190}
190 191
192static int
193sclp_console_notify(struct notifier_block *self,
194 unsigned long event, void *data)
195{
196 sclp_console_flush();
197 return NOTIFY_OK;
198}
199
200static struct notifier_block on_panic_nb = {
201 .notifier_call = sclp_console_notify,
202 .priority = 1,
203};
204
205static struct notifier_block on_reboot_nb = {
206 .notifier_call = sclp_console_notify,
207 .priority = 1,
208};
209
191/* 210/*
192 * used to register the SCLP console to the kernel and to 211 * used to register the SCLP console to the kernel and to
193 * give printk necessary information 212 * give printk necessary information
@@ -197,7 +216,6 @@ static struct console sclp_console =
197 .name = sclp_console_name, 216 .name = sclp_console_name,
198 .write = sclp_console_write, 217 .write = sclp_console_write,
199 .device = sclp_console_device, 218 .device = sclp_console_device,
200 .unblank = sclp_console_unblank,
201 .flags = CON_PRINTBUFFER, 219 .flags = CON_PRINTBUFFER,
202 .index = 0 /* ttyS0 */ 220 .index = 0 /* ttyS0 */
203}; 221};
@@ -241,6 +259,8 @@ sclp_console_init(void)
241 sclp_con_width_htab = 8; 259 sclp_con_width_htab = 8;
242 260
243 /* enable printk-access to this driver */ 261 /* enable printk-access to this driver */
262 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
263 register_reboot_notifier(&on_reboot_nb);
244 register_console(&sclp_console); 264 register_console(&sclp_console);
245 return 0; 265 return 0;
246} 266}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index ad51738c4261..9854f19f5e62 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -24,6 +24,8 @@
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/reboot.h>
28
27#include <asm/uaccess.h> 29#include <asm/uaccess.h>
28#include "sclp.h" 30#include "sclp.h"
29 31
@@ -743,24 +745,30 @@ sclp_vt220_con_device(struct console *c, int *index)
743 return sclp_vt220_driver; 745 return sclp_vt220_driver;
744} 746}
745 747
746/* 748static int
747 * This routine is called from panic when the kernel is going to give up. 749sclp_vt220_notify(struct notifier_block *self,
748 * We have to make sure that all buffers will be flushed to the SCLP. 750 unsigned long event, void *data)
749 * Note that this function may be called from within an interrupt context.
750 */
751static void
752sclp_vt220_con_unblank(void)
753{ 751{
754 __sclp_vt220_flush_buffer(); 752 __sclp_vt220_flush_buffer();
753 return NOTIFY_OK;
755} 754}
756 755
756static struct notifier_block on_panic_nb = {
757 .notifier_call = sclp_vt220_notify,
758 .priority = 1,
759};
760
761static struct notifier_block on_reboot_nb = {
762 .notifier_call = sclp_vt220_notify,
763 .priority = 1,
764};
765
757/* Structure needed to register with printk */ 766/* Structure needed to register with printk */
758static struct console sclp_vt220_console = 767static struct console sclp_vt220_console =
759{ 768{
760 .name = SCLP_VT220_CONSOLE_NAME, 769 .name = SCLP_VT220_CONSOLE_NAME,
761 .write = sclp_vt220_con_write, 770 .write = sclp_vt220_con_write,
762 .device = sclp_vt220_con_device, 771 .device = sclp_vt220_con_device,
763 .unblank = sclp_vt220_con_unblank,
764 .flags = CON_PRINTBUFFER, 772 .flags = CON_PRINTBUFFER,
765 .index = SCLP_VT220_CONSOLE_INDEX 773 .index = SCLP_VT220_CONSOLE_INDEX
766}; 774};
@@ -776,6 +784,8 @@ sclp_vt220_con_init(void)
776 if (rc) 784 if (rc)
777 return rc; 785 return rc;
778 /* Attach linux console */ 786 /* Attach linux console */
787 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
788 register_reboot_notifier(&on_reboot_nb);
779 register_console(&sclp_vt220_console); 789 register_console(&sclp_vt220_console);
780 return 0; 790 return 0;
781} 791}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 839987618ffd..4005c44a404c 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -910,7 +910,7 @@ tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
910 * should proceed with the new tape... this 910 * should proceed with the new tape... this
911 * should probably be done in user space! 911 * should probably be done in user space!
912 */ 912 */
913 PRINT_WARN("(%s): Swap Tape Device!\n", device->cdev->dev.bus_id); 913 PRINT_WARN("(%s): Swap Tape Device!\n", dev_name(&device->cdev->dev));
914 return tape_3590_erp_basic(device, request, irb, -EIO); 914 return tape_3590_erp_basic(device, request, irb, -EIO);
915} 915}
916 916
@@ -1003,40 +1003,43 @@ tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
1003 /* Exception Message */ 1003 /* Exception Message */
1004 switch (sense->fmt.f70.emc) { 1004 switch (sense->fmt.f70.emc) {
1005 case 0x02: 1005 case 0x02:
1006 PRINT_WARN("(%s): Data degraded\n", device->cdev->dev.bus_id); 1006 PRINT_WARN("(%s): Data degraded\n",
1007 dev_name(&device->cdev->dev));
1007 break; 1008 break;
1008 case 0x03: 1009 case 0x03:
1009 PRINT_WARN("(%s): Data degraded in partion %i\n", 1010 PRINT_WARN("(%s): Data degraded in partion %i\n",
1010 device->cdev->dev.bus_id, sense->fmt.f70.mp); 1011 dev_name(&device->cdev->dev), sense->fmt.f70.mp);
1011 break; 1012 break;
1012 case 0x04: 1013 case 0x04:
1013 PRINT_WARN("(%s): Medium degraded\n", device->cdev->dev.bus_id); 1014 PRINT_WARN("(%s): Medium degraded\n",
1015 dev_name(&device->cdev->dev));
1014 break; 1016 break;
1015 case 0x05: 1017 case 0x05:
1016 PRINT_WARN("(%s): Medium degraded in partition %i\n", 1018 PRINT_WARN("(%s): Medium degraded in partition %i\n",
1017 device->cdev->dev.bus_id, sense->fmt.f70.mp); 1019 dev_name(&device->cdev->dev), sense->fmt.f70.mp);
1018 break; 1020 break;
1019 case 0x06: 1021 case 0x06:
1020 PRINT_WARN("(%s): Block 0 Error\n", device->cdev->dev.bus_id); 1022 PRINT_WARN("(%s): Block 0 Error\n",
1023 dev_name(&device->cdev->dev));
1021 break; 1024 break;
1022 case 0x07: 1025 case 0x07:
1023 PRINT_WARN("(%s): Medium Exception 0x%02x\n", 1026 PRINT_WARN("(%s): Medium Exception 0x%02x\n",
1024 device->cdev->dev.bus_id, sense->fmt.f70.md); 1027 dev_name(&device->cdev->dev), sense->fmt.f70.md);
1025 break; 1028 break;
1026 default: 1029 default:
1027 PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n", 1030 PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n",
1028 device->cdev->dev.bus_id, sense->fmt.f70.emc); 1031 dev_name(&device->cdev->dev), sense->fmt.f70.emc);
1029 break; 1032 break;
1030 } 1033 }
1031 /* Service Message */ 1034 /* Service Message */
1032 switch (sense->fmt.f70.smc) { 1035 switch (sense->fmt.f70.smc) {
1033 case 0x02: 1036 case 0x02:
1034 PRINT_WARN("(%s): Reference Media maintenance procedure %i\n", 1037 PRINT_WARN("(%s): Reference Media maintenance procedure %i\n",
1035 device->cdev->dev.bus_id, sense->fmt.f70.md); 1038 dev_name(&device->cdev->dev), sense->fmt.f70.md);
1036 break; 1039 break;
1037 default: 1040 default:
1038 PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n", 1041 PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n",
1039 device->cdev->dev.bus_id, sense->fmt.f70.smc); 1042 dev_name(&device->cdev->dev), sense->fmt.f70.smc);
1040 break; 1043 break;
1041 } 1044 }
1042} 1045}
@@ -1054,101 +1057,101 @@ tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
1054 switch (sense->fmt.f71.emc) { 1057 switch (sense->fmt.f71.emc) {
1055 case 0x01: 1058 case 0x01:
1056 PRINT_WARN("(%s): Effect of failure is unknown\n", 1059 PRINT_WARN("(%s): Effect of failure is unknown\n",
1057 device->cdev->dev.bus_id); 1060 dev_name(&device->cdev->dev));
1058 break; 1061 break;
1059 case 0x02: 1062 case 0x02:
1060 PRINT_WARN("(%s): CU Exception - no performance impact\n", 1063 PRINT_WARN("(%s): CU Exception - no performance impact\n",
1061 device->cdev->dev.bus_id); 1064 dev_name(&device->cdev->dev));
1062 break; 1065 break;
1063 case 0x03: 1066 case 0x03:
1064 PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n", 1067 PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n",
1065 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1068 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1066 break; 1069 break;
1067 case 0x04: 1070 case 0x04:
1068 PRINT_WARN("(%s): CU Exception on device path 0x%02x\n", 1071 PRINT_WARN("(%s): CU Exception on device path 0x%02x\n",
1069 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1072 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1070 break; 1073 break;
1071 case 0x05: 1074 case 0x05:
1072 PRINT_WARN("(%s): CU Exception on library path 0x%02x\n", 1075 PRINT_WARN("(%s): CU Exception on library path 0x%02x\n",
1073 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1076 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1074 break; 1077 break;
1075 case 0x06: 1078 case 0x06:
1076 PRINT_WARN("(%s): CU Exception on node 0x%02x\n", 1079 PRINT_WARN("(%s): CU Exception on node 0x%02x\n",
1077 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1080 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1078 break; 1081 break;
1079 case 0x07: 1082 case 0x07:
1080 PRINT_WARN("(%s): CU Exception on partition 0x%02x\n", 1083 PRINT_WARN("(%s): CU Exception on partition 0x%02x\n",
1081 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1084 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1082 break; 1085 break;
1083 default: 1086 default:
1084 PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n", 1087 PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n",
1085 device->cdev->dev.bus_id, sense->fmt.f71.emc); 1088 dev_name(&device->cdev->dev), sense->fmt.f71.emc);
1086 } 1089 }
1087 /* Service Message */ 1090 /* Service Message */
1088 switch (sense->fmt.f71.smc) { 1091 switch (sense->fmt.f71.smc) {
1089 case 0x01: 1092 case 0x01:
1090 PRINT_WARN("(%s): Repair impact is unknown\n", 1093 PRINT_WARN("(%s): Repair impact is unknown\n",
1091 device->cdev->dev.bus_id); 1094 dev_name(&device->cdev->dev));
1092 break; 1095 break;
1093 case 0x02: 1096 case 0x02:
1094 PRINT_WARN("(%s): Repair will not impact cu performance\n", 1097 PRINT_WARN("(%s): Repair will not impact cu performance\n",
1095 device->cdev->dev.bus_id); 1098 dev_name(&device->cdev->dev));
1096 break; 1099 break;
1097 case 0x03: 1100 case 0x03:
1098 if (sense->fmt.f71.mdf == 0) 1101 if (sense->fmt.f71.mdf == 0)
1099 PRINT_WARN("(%s): Repair will disable node " 1102 PRINT_WARN("(%s): Repair will disable node "
1100 "0x%x on CU\n", 1103 "0x%x on CU\n",
1101 device->cdev->dev.bus_id, 1104 dev_name(&device->cdev->dev),
1102 sense->fmt.f71.md[1]); 1105 sense->fmt.f71.md[1]);
1103 else 1106 else
1104 PRINT_WARN("(%s): Repair will disable nodes " 1107 PRINT_WARN("(%s): Repair will disable nodes "
1105 "(0x%x-0x%x) on CU\n", 1108 "(0x%x-0x%x) on CU\n",
1106 device->cdev->dev.bus_id, 1109 dev_name(&device->cdev->dev),
1107 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1110 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1108 break; 1111 break;
1109 case 0x04: 1112 case 0x04:
1110 if (sense->fmt.f71.mdf == 0) 1113 if (sense->fmt.f71.mdf == 0)
1111 PRINT_WARN("(%s): Repair will disable cannel path " 1114 PRINT_WARN("(%s): Repair will disable cannel path "
1112 "0x%x on CU\n", 1115 "0x%x on CU\n",
1113 device->cdev->dev.bus_id, 1116 dev_name(&device->cdev->dev),
1114 sense->fmt.f71.md[1]); 1117 sense->fmt.f71.md[1]);
1115 else 1118 else
1116 PRINT_WARN("(%s): Repair will disable cannel paths " 1119 PRINT_WARN("(%s): Repair will disable cannel paths "
1117 "(0x%x-0x%x) on CU\n", 1120 "(0x%x-0x%x) on CU\n",
1118 device->cdev->dev.bus_id, 1121 dev_name(&device->cdev->dev),
1119 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1122 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1120 break; 1123 break;
1121 case 0x05: 1124 case 0x05:
1122 if (sense->fmt.f71.mdf == 0) 1125 if (sense->fmt.f71.mdf == 0)
1123 PRINT_WARN("(%s): Repair will disable device path " 1126 PRINT_WARN("(%s): Repair will disable device path "
1124 "0x%x on CU\n", 1127 "0x%x on CU\n",
1125 device->cdev->dev.bus_id, 1128 dev_name(&device->cdev->dev),
1126 sense->fmt.f71.md[1]); 1129 sense->fmt.f71.md[1]);
1127 else 1130 else
1128 PRINT_WARN("(%s): Repair will disable device paths " 1131 PRINT_WARN("(%s): Repair will disable device paths "
1129 "(0x%x-0x%x) on CU\n", 1132 "(0x%x-0x%x) on CU\n",
1130 device->cdev->dev.bus_id, 1133 dev_name(&device->cdev->dev),
1131 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1134 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1132 break; 1135 break;
1133 case 0x06: 1136 case 0x06:
1134 if (sense->fmt.f71.mdf == 0) 1137 if (sense->fmt.f71.mdf == 0)
1135 PRINT_WARN("(%s): Repair will disable library path " 1138 PRINT_WARN("(%s): Repair will disable library path "
1136 "0x%x on CU\n", 1139 "0x%x on CU\n",
1137 device->cdev->dev.bus_id, 1140 dev_name(&device->cdev->dev),
1138 sense->fmt.f71.md[1]); 1141 sense->fmt.f71.md[1]);
1139 else 1142 else
1140 PRINT_WARN("(%s): Repair will disable library paths " 1143 PRINT_WARN("(%s): Repair will disable library paths "
1141 "(0x%x-0x%x) on CU\n", 1144 "(0x%x-0x%x) on CU\n",
1142 device->cdev->dev.bus_id, 1145 dev_name(&device->cdev->dev),
1143 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1146 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1144 break; 1147 break;
1145 case 0x07: 1148 case 0x07:
1146 PRINT_WARN("(%s): Repair will disable access to CU\n", 1149 PRINT_WARN("(%s): Repair will disable access to CU\n",
1147 device->cdev->dev.bus_id); 1150 dev_name(&device->cdev->dev));
1148 break; 1151 break;
1149 default: 1152 default:
1150 PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n", 1153 PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n",
1151 device->cdev->dev.bus_id, sense->fmt.f71.smc); 1154 dev_name(&device->cdev->dev), sense->fmt.f71.smc);
1152 } 1155 }
1153} 1156}
1154 1157
@@ -1165,104 +1168,104 @@ tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
1165 switch (sense->fmt.f71.emc) { 1168 switch (sense->fmt.f71.emc) {
1166 case 0x01: 1169 case 0x01:
1167 PRINT_WARN("(%s): Effect of failure is unknown\n", 1170 PRINT_WARN("(%s): Effect of failure is unknown\n",
1168 device->cdev->dev.bus_id); 1171 dev_name(&device->cdev->dev));
1169 break; 1172 break;
1170 case 0x02: 1173 case 0x02:
1171 PRINT_WARN("(%s): DV Exception - no performance impact\n", 1174 PRINT_WARN("(%s): DV Exception - no performance impact\n",
1172 device->cdev->dev.bus_id); 1175 dev_name(&device->cdev->dev));
1173 break; 1176 break;
1174 case 0x03: 1177 case 0x03:
1175 PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n", 1178 PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n",
1176 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1179 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1177 break; 1180 break;
1178 case 0x04: 1181 case 0x04:
1179 PRINT_WARN("(%s): DV Exception on loader 0x%02x\n", 1182 PRINT_WARN("(%s): DV Exception on loader 0x%02x\n",
1180 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1183 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1181 break; 1184 break;
1182 case 0x05: 1185 case 0x05:
1183 PRINT_WARN("(%s): DV Exception on message display 0x%02x\n", 1186 PRINT_WARN("(%s): DV Exception on message display 0x%02x\n",
1184 device->cdev->dev.bus_id, sense->fmt.f71.md[0]); 1187 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
1185 break; 1188 break;
1186 case 0x06: 1189 case 0x06:
1187 PRINT_WARN("(%s): DV Exception in tape path\n", 1190 PRINT_WARN("(%s): DV Exception in tape path\n",
1188 device->cdev->dev.bus_id); 1191 dev_name(&device->cdev->dev));
1189 break; 1192 break;
1190 case 0x07: 1193 case 0x07:
1191 PRINT_WARN("(%s): DV Exception in drive\n", 1194 PRINT_WARN("(%s): DV Exception in drive\n",
1192 device->cdev->dev.bus_id); 1195 dev_name(&device->cdev->dev));
1193 break; 1196 break;
1194 default: 1197 default:
1195 PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n", 1198 PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n",
1196 device->cdev->dev.bus_id, sense->fmt.f71.emc); 1199 dev_name(&device->cdev->dev), sense->fmt.f71.emc);
1197 } 1200 }
1198 /* Service Message */ 1201 /* Service Message */
1199 switch (sense->fmt.f71.smc) { 1202 switch (sense->fmt.f71.smc) {
1200 case 0x01: 1203 case 0x01:
1201 PRINT_WARN("(%s): Repair impact is unknown\n", 1204 PRINT_WARN("(%s): Repair impact is unknown\n",
1202 device->cdev->dev.bus_id); 1205 dev_name(&device->cdev->dev));
1203 break; 1206 break;
1204 case 0x02: 1207 case 0x02:
1205 PRINT_WARN("(%s): Repair will not impact device performance\n", 1208 PRINT_WARN("(%s): Repair will not impact device performance\n",
1206 device->cdev->dev.bus_id); 1209 dev_name(&device->cdev->dev));
1207 break; 1210 break;
1208 case 0x03: 1211 case 0x03:
1209 if (sense->fmt.f71.mdf == 0) 1212 if (sense->fmt.f71.mdf == 0)
1210 PRINT_WARN("(%s): Repair will disable channel path " 1213 PRINT_WARN("(%s): Repair will disable channel path "
1211 "0x%x on DV\n", 1214 "0x%x on DV\n",
1212 device->cdev->dev.bus_id, 1215 dev_name(&device->cdev->dev),
1213 sense->fmt.f71.md[1]); 1216 sense->fmt.f71.md[1]);
1214 else 1217 else
1215 PRINT_WARN("(%s): Repair will disable channel path " 1218 PRINT_WARN("(%s): Repair will disable channel path "
1216 "(0x%x-0x%x) on DV\n", 1219 "(0x%x-0x%x) on DV\n",
1217 device->cdev->dev.bus_id, 1220 dev_name(&device->cdev->dev),
1218 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1221 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1219 break; 1222 break;
1220 case 0x04: 1223 case 0x04:
1221 if (sense->fmt.f71.mdf == 0) 1224 if (sense->fmt.f71.mdf == 0)
1222 PRINT_WARN("(%s): Repair will disable interface 0x%x " 1225 PRINT_WARN("(%s): Repair will disable interface 0x%x "
1223 "on DV\n", 1226 "on DV\n",
1224 device->cdev->dev.bus_id, 1227 dev_name(&device->cdev->dev),
1225 sense->fmt.f71.md[1]); 1228 sense->fmt.f71.md[1]);
1226 else 1229 else
1227 PRINT_WARN("(%s): Repair will disable interfaces " 1230 PRINT_WARN("(%s): Repair will disable interfaces "
1228 "(0x%x-0x%x) on DV\n", 1231 "(0x%x-0x%x) on DV\n",
1229 device->cdev->dev.bus_id, 1232 dev_name(&device->cdev->dev),
1230 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1233 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1231 break; 1234 break;
1232 case 0x05: 1235 case 0x05:
1233 if (sense->fmt.f71.mdf == 0) 1236 if (sense->fmt.f71.mdf == 0)
1234 PRINT_WARN("(%s): Repair will disable loader 0x%x " 1237 PRINT_WARN("(%s): Repair will disable loader 0x%x "
1235 "on DV\n", 1238 "on DV\n",
1236 device->cdev->dev.bus_id, 1239 dev_name(&device->cdev->dev),
1237 sense->fmt.f71.md[1]); 1240 sense->fmt.f71.md[1]);
1238 else 1241 else
1239 PRINT_WARN("(%s): Repair will disable loader " 1242 PRINT_WARN("(%s): Repair will disable loader "
1240 "(0x%x-0x%x) on DV\n", 1243 "(0x%x-0x%x) on DV\n",
1241 device->cdev->dev.bus_id, 1244 dev_name(&device->cdev->dev),
1242 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1245 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1243 break; 1246 break;
1244 case 0x07: 1247 case 0x07:
1245 PRINT_WARN("(%s): Repair will disable access to DV\n", 1248 PRINT_WARN("(%s): Repair will disable access to DV\n",
1246 device->cdev->dev.bus_id); 1249 dev_name(&device->cdev->dev));
1247 break; 1250 break;
1248 case 0x08: 1251 case 0x08:
1249 if (sense->fmt.f71.mdf == 0) 1252 if (sense->fmt.f71.mdf == 0)
1250 PRINT_WARN("(%s): Repair will disable message " 1253 PRINT_WARN("(%s): Repair will disable message "
1251 "display 0x%x on DV\n", 1254 "display 0x%x on DV\n",
1252 device->cdev->dev.bus_id, 1255 dev_name(&device->cdev->dev),
1253 sense->fmt.f71.md[1]); 1256 sense->fmt.f71.md[1]);
1254 else 1257 else
1255 PRINT_WARN("(%s): Repair will disable message " 1258 PRINT_WARN("(%s): Repair will disable message "
1256 "displays (0x%x-0x%x) on DV\n", 1259 "displays (0x%x-0x%x) on DV\n",
1257 device->cdev->dev.bus_id, 1260 dev_name(&device->cdev->dev),
1258 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1261 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1259 break; 1262 break;
1260 case 0x09: 1263 case 0x09:
1261 PRINT_WARN("(%s): Clean DV\n", device->cdev->dev.bus_id); 1264 PRINT_WARN("(%s): Clean DV\n", dev_name(&device->cdev->dev));
1262 break; 1265 break;
1263 default: 1266 default:
1264 PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n", 1267 PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n",
1265 device->cdev->dev.bus_id, sense->fmt.f71.smc); 1268 dev_name(&device->cdev->dev), sense->fmt.f71.smc);
1266 } 1269 }
1267} 1270}
1268 1271
@@ -1279,18 +1282,18 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1279 return; 1282 return;
1280 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { 1283 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
1281 if (tape_3590_msg[sense->mc] != NULL) 1284 if (tape_3590_msg[sense->mc] != NULL)
1282 PRINT_WARN("(%s): %s\n", device->cdev->dev.bus_id, 1285 PRINT_WARN("(%s): %s\n", dev_name(&device->cdev->dev),
1283 tape_3590_msg[sense->mc]); 1286 tape_3590_msg[sense->mc]);
1284 else { 1287 else {
1285 PRINT_WARN("(%s): Message Code 0x%x\n", 1288 PRINT_WARN("(%s): Message Code 0x%x\n",
1286 device->cdev->dev.bus_id, sense->mc); 1289 dev_name(&device->cdev->dev), sense->mc);
1287 } 1290 }
1288 return; 1291 return;
1289 } 1292 }
1290 if (sense->mc == 0xf0) { 1293 if (sense->mc == 0xf0) {
1291 /* Standard Media Information Message */ 1294 /* Standard Media Information Message */
1292 PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, " 1295 PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, "
1293 "RC=%02x-%04x-%02x\n", device->cdev->dev.bus_id, 1296 "RC=%02x-%04x-%02x\n", dev_name(&device->cdev->dev),
1294 sense->fmt.f70.sev, sense->mc, 1297 sense->fmt.f70.sev, sense->mc,
1295 sense->fmt.f70.emc, sense->fmt.f70.smc, 1298 sense->fmt.f70.emc, sense->fmt.f70.smc,
1296 sense->fmt.f70.refcode, sense->fmt.f70.mid, 1299 sense->fmt.f70.refcode, sense->fmt.f70.mid,
@@ -1302,7 +1305,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1302 /* Standard I/O Subsystem Service Information Message */ 1305 /* Standard I/O Subsystem Service Information Message */
1303 PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, " 1306 PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, "
1304 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1307 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1305 device->cdev->dev.bus_id, sense->fmt.f71.sev, 1308 dev_name(&device->cdev->dev), sense->fmt.f71.sev,
1306 device->cdev->id.dev_model, 1309 device->cdev->id.dev_model,
1307 sense->mc, sense->fmt.f71.emc, 1310 sense->mc, sense->fmt.f71.emc,
1308 sense->fmt.f71.smc, sense->fmt.f71.refcode1, 1311 sense->fmt.f71.smc, sense->fmt.f71.refcode1,
@@ -1314,7 +1317,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1314 /* Standard Device Service Information Message */ 1317 /* Standard Device Service Information Message */
1315 PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, " 1318 PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, "
1316 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1319 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1317 device->cdev->dev.bus_id, sense->fmt.f71.sev, 1320 dev_name(&device->cdev->dev), sense->fmt.f71.sev,
1318 device->cdev->id.dev_model, 1321 device->cdev->id.dev_model,
1319 sense->mc, sense->fmt.f71.emc, 1322 sense->mc, sense->fmt.f71.emc,
1320 sense->fmt.f71.smc, sense->fmt.f71.refcode1, 1323 sense->fmt.f71.smc, sense->fmt.f71.refcode1,
@@ -1327,7 +1330,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1327 return; 1330 return;
1328 } 1331 }
1329 PRINT_WARN("(%s): Device Message(%x)\n", 1332 PRINT_WARN("(%s): Device Message(%x)\n",
1330 device->cdev->dev.bus_id, sense->mc); 1333 dev_name(&device->cdev->dev), sense->mc);
1331} 1334}
1332 1335
1333static int tape_3590_crypt_error(struct tape_device *device, 1336static int tape_3590_crypt_error(struct tape_device *device,
@@ -1336,10 +1339,11 @@ static int tape_3590_crypt_error(struct tape_device *device,
1336 u8 cu_rc, ekm_rc1; 1339 u8 cu_rc, ekm_rc1;
1337 u16 ekm_rc2; 1340 u16 ekm_rc2;
1338 u32 drv_rc; 1341 u32 drv_rc;
1339 char *bus_id, *sense; 1342 const char *bus_id;
1343 char *sense;
1340 1344
1341 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; 1345 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1342 bus_id = device->cdev->dev.bus_id; 1346 bus_id = dev_name(&device->cdev->dev);
1343 cu_rc = sense[0]; 1347 cu_rc = sense[0];
1344 drv_rc = *((u32*) &sense[5]) & 0xffffff; 1348 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1345 ekm_rc1 = sense[9]; 1349 ekm_rc1 = sense[9];
@@ -1440,7 +1444,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1440 * "device intervention" is not very meaningfull 1444 * "device intervention" is not very meaningfull
1441 */ 1445 */
1442 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1446 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1443 device->cdev->dev.bus_id); 1447 dev_name(&device->cdev->dev));
1444 tape_med_state_set(device, MS_UNLOADED); 1448 tape_med_state_set(device, MS_UNLOADED);
1445 tape_3590_schedule_work(device, TO_CRYPT_OFF); 1449 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1446 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1450 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
@@ -1487,18 +1491,18 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1487 1491
1488 case 0x6020: 1492 case 0x6020:
1489 PRINT_WARN("(%s): Cartridge of wrong type ?\n", 1493 PRINT_WARN("(%s): Cartridge of wrong type ?\n",
1490 device->cdev->dev.bus_id); 1494 dev_name(&device->cdev->dev));
1491 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); 1495 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
1492 1496
1493 case 0x8011: 1497 case 0x8011:
1494 PRINT_WARN("(%s): Another host has reserved the tape device\n", 1498 PRINT_WARN("(%s): Another host has reserved the tape device\n",
1495 device->cdev->dev.bus_id); 1499 dev_name(&device->cdev->dev));
1496 return tape_3590_erp_basic(device, request, irb, -EPERM); 1500 return tape_3590_erp_basic(device, request, irb, -EPERM);
1497 case 0x8013: 1501 case 0x8013:
1498 PRINT_WARN("(%s): Another host has privileged access to the " 1502 PRINT_WARN("(%s): Another host has privileged access to the "
1499 "tape device\n", device->cdev->dev.bus_id); 1503 "tape device\n", dev_name(&device->cdev->dev));
1500 PRINT_WARN("(%s): To solve the problem unload the current " 1504 PRINT_WARN("(%s): To solve the problem unload the current "
1501 "cartridge!\n", device->cdev->dev.bus_id); 1505 "cartridge!\n", dev_name(&device->cdev->dev));
1502 return tape_3590_erp_basic(device, request, irb, -EPERM); 1506 return tape_3590_erp_basic(device, request, irb, -EPERM);
1503 default: 1507 default:
1504 return tape_3590_erp_basic(device, request, irb, -EIO); 1508 return tape_3590_erp_basic(device, request, irb, -EIO);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 95da72bc17e8..a25b8bf54f41 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -278,7 +278,7 @@ tapeblock_cleanup_device(struct tape_device *device)
278 278
279 if (!device->blk_data.disk) { 279 if (!device->blk_data.disk) {
280 PRINT_ERR("(%s): No gendisk to clean up!\n", 280 PRINT_ERR("(%s): No gendisk to clean up!\n",
281 device->cdev->dev.bus_id); 281 dev_name(&device->cdev->dev));
282 goto cleanup_queue; 282 goto cleanup_queue;
283 } 283 }
284 284
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 12c2a5aaf31b..ddc914ccea8f 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -69,9 +69,9 @@ struct tape_class_device *register_tape_dev(
69 if (rc) 69 if (rc)
70 goto fail_with_cdev; 70 goto fail_with_cdev;
71 71
72 tcd->class_device = device_create_drvdata(tape_class, device, 72 tcd->class_device = device_create(tape_class, device,
73 tcd->char_device->dev, 73 tcd->char_device->dev, NULL,
74 NULL, "%s", tcd->device_name); 74 "%s", tcd->device_name);
75 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; 75 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
76 if (rc) 76 if (rc)
77 goto fail_with_cdev; 77 goto fail_with_cdev;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 181a5441af16..d7073dbf825c 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -215,12 +215,12 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
215 case MS_UNLOADED: 215 case MS_UNLOADED:
216 device->tape_generic_status |= GMT_DR_OPEN(~0); 216 device->tape_generic_status |= GMT_DR_OPEN(~0);
217 PRINT_INFO("(%s): Tape is unloaded\n", 217 PRINT_INFO("(%s): Tape is unloaded\n",
218 device->cdev->dev.bus_id); 218 dev_name(&device->cdev->dev));
219 break; 219 break;
220 case MS_LOADED: 220 case MS_LOADED:
221 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 221 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
222 PRINT_INFO("(%s): Tape has been mounted\n", 222 PRINT_INFO("(%s): Tape has been mounted\n",
223 device->cdev->dev.bus_id); 223 dev_name(&device->cdev->dev));
224 break; 224 break;
225 default: 225 default:
226 // print nothing 226 // print nothing
@@ -415,7 +415,7 @@ tape_generic_offline(struct tape_device *device)
415 device->cdev_id); 415 device->cdev_id);
416 PRINT_WARN("(%s): Set offline failed " 416 PRINT_WARN("(%s): Set offline failed "
417 "- drive in use.\n", 417 "- drive in use.\n",
418 device->cdev->dev.bus_id); 418 dev_name(&device->cdev->dev));
419 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 419 spin_unlock_irq(get_ccwdev_lock(device->cdev));
420 return -EBUSY; 420 return -EBUSY;
421 } 421 }
@@ -538,7 +538,8 @@ tape_generic_probe(struct ccw_device *cdev)
538 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 538 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
539 if (ret) { 539 if (ret) {
540 tape_put_device(device); 540 tape_put_device(device);
541 PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id); 541 PRINT_ERR("probe failed for tape device %s\n",
542 dev_name(&cdev->dev));
542 return ret; 543 return ret;
543 } 544 }
544 cdev->dev.driver_data = device; 545 cdev->dev.driver_data = device;
@@ -546,7 +547,7 @@ tape_generic_probe(struct ccw_device *cdev)
546 device->cdev = cdev; 547 device->cdev = cdev;
547 ccw_device_get_id(cdev, &dev_id); 548 ccw_device_get_id(cdev, &dev_id);
548 device->cdev_id = devid_to_int(&dev_id); 549 device->cdev_id = devid_to_int(&dev_id);
549 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 550 PRINT_INFO("tape device %s found\n", dev_name(&cdev->dev));
550 return ret; 551 return ret;
551} 552}
552 553
@@ -616,7 +617,7 @@ tape_generic_remove(struct ccw_device *cdev)
616 device->cdev_id); 617 device->cdev_id);
617 PRINT_WARN("(%s): Drive in use vanished - " 618 PRINT_WARN("(%s): Drive in use vanished - "
618 "expect trouble!\n", 619 "expect trouble!\n",
619 device->cdev->dev.bus_id); 620 dev_name(&device->cdev->dev));
620 PRINT_WARN("State was %i\n", device->tape_state); 621 PRINT_WARN("State was %i\n", device->tape_state);
621 tape_state_set(device, TS_NOT_OPER); 622 tape_state_set(device, TS_NOT_OPER);
622 __tape_discard_requests(device); 623 __tape_discard_requests(device);
@@ -840,7 +841,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
840 PRINT_INFO("-------------------------------------------------\n"); 841 PRINT_INFO("-------------------------------------------------\n");
841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 842 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
842 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa); 843 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 844 PRINT_INFO("DEVICE: %s\n", dev_name(&device->cdev->dev));
844 if (request != NULL) 845 if (request != NULL)
845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 846 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
846 847
@@ -1051,7 +1052,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1051 device = (struct tape_device *) cdev->dev.driver_data; 1052 device = (struct tape_device *) cdev->dev.driver_data;
1052 if (device == NULL) { 1053 if (device == NULL) {
1053 PRINT_ERR("could not get device structure for %s " 1054 PRINT_ERR("could not get device structure for %s "
1054 "in interrupt\n", cdev->dev.bus_id); 1055 "in interrupt\n", dev_name(&cdev->dev));
1055 return; 1056 return;
1056 } 1057 }
1057 request = (struct tape_request *) intparm; 1058 request = (struct tape_request *) intparm;
@@ -1064,13 +1065,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1064 switch (PTR_ERR(irb)) { 1065 switch (PTR_ERR(irb)) {
1065 case -ETIMEDOUT: 1066 case -ETIMEDOUT:
1066 PRINT_WARN("(%s): Request timed out\n", 1067 PRINT_WARN("(%s): Request timed out\n",
1067 cdev->dev.bus_id); 1068 dev_name(&cdev->dev));
1068 case -EIO: 1069 case -EIO:
1069 __tape_end_request(device, request, -EIO); 1070 __tape_end_request(device, request, -EIO);
1070 break; 1071 break;
1071 default: 1072 default:
1072 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1073 PRINT_ERR("(%s): Unexpected i/o error %li\n",
1073 cdev->dev.bus_id, 1074 dev_name(&cdev->dev),
1074 PTR_ERR(irb)); 1075 PTR_ERR(irb));
1075 } 1076 }
1076 return; 1077 return;
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index e7c888c14e71..8a376af926a7 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -52,7 +52,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
52 return 0; 52 return 0;
53 spin_lock_irq(get_ccwdev_lock(device->cdev)); 53 spin_lock_irq(get_ccwdev_lock(device->cdev));
54 seq_printf(m, "%d\t", (int) n); 54 seq_printf(m, "%d\t", (int) n);
55 seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id); 55 seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
56 seq_printf(m, "%04X/", device->cdev->id.cu_type); 56 seq_printf(m, "%04X/", device->cdev->id.cu_type);
57 seq_printf(m, "%02X\t", device->cdev->id.cu_model); 57 seq_printf(m, "%02X\t", device->cdev->id.cu_model);
58 seq_printf(m, "%04X/", device->cdev->id.dev_type); 58 seq_printf(m, "%04X/", device->cdev->id.dev_type);
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index cc8fd781ee22..5bd573d144d6 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -47,7 +47,7 @@ tape_std_assign_timeout(unsigned long data)
47 rc = tape_cancel_io(device, request); 47 rc = tape_cancel_io(device, request);
48 if(rc) 48 if(rc)
49 PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n", 49 PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n",
50 device->cdev->dev.bus_id, rc); 50 dev_name(&device->cdev->dev), rc);
51 51
52} 52}
53 53
@@ -83,7 +83,7 @@ tape_std_assign(struct tape_device *device)
83 83
84 if (rc != 0) { 84 if (rc != 0) {
85 PRINT_WARN("%s: assign failed - device might be busy\n", 85 PRINT_WARN("%s: assign failed - device might be busy\n",
86 device->cdev->dev.bus_id); 86 dev_name(&device->cdev->dev));
87 DBF_EVENT(3, "%08x: assign failed - device might be busy\n", 87 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
88 device->cdev_id); 88 device->cdev_id);
89 } else { 89 } else {
@@ -106,7 +106,7 @@ tape_std_unassign (struct tape_device *device)
106 DBF_EVENT(3, "(%08x): Can't unassign device\n", 106 DBF_EVENT(3, "(%08x): Can't unassign device\n",
107 device->cdev_id); 107 device->cdev_id);
108 PRINT_WARN("(%s): Can't unassign device - device gone\n", 108 PRINT_WARN("(%s): Can't unassign device - device gone\n",
109 device->cdev->dev.bus_id); 109 dev_name(&device->cdev->dev));
110 return -EIO; 110 return -EIO;
111 } 111 }
112 112
@@ -120,7 +120,8 @@ tape_std_unassign (struct tape_device *device)
120 120
121 if ((rc = tape_do_io(device, request)) != 0) { 121 if ((rc = tape_do_io(device, request)) != 0) {
122 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id); 122 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
123 PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id); 123 PRINT_WARN("%s: Unassign failed\n",
124 dev_name(&device->cdev->dev));
124 } else { 125 } else {
125 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id); 126 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
126 } 127 }
@@ -634,10 +635,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
634 DBF_EXCEPTION(6, "xcom parm\n"); 635 DBF_EXCEPTION(6, "xcom parm\n");
635 if (*device->modeset_byte & 0x08) 636 if (*device->modeset_byte & 0x08)
636 PRINT_INFO("(%s) Compression is currently on\n", 637 PRINT_INFO("(%s) Compression is currently on\n",
637 device->cdev->dev.bus_id); 638 dev_name(&device->cdev->dev));
638 else 639 else
639 PRINT_INFO("(%s) Compression is currently off\n", 640 PRINT_INFO("(%s) Compression is currently off\n",
640 device->cdev->dev.bus_id); 641 dev_name(&device->cdev->dev));
641 PRINT_INFO("Use 1 to switch compression on, 0 to " 642 PRINT_INFO("Use 1 to switch compression on, 0 to "
642 "switch it off\n"); 643 "switch it off\n");
643 return -EINVAL; 644 return -EINVAL;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c31faefa2b3b..24762727bc27 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -724,8 +724,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
724 724
725 dev = kzalloc(sizeof(struct device), GFP_KERNEL); 725 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
726 if (dev) { 726 if (dev) {
727 snprintf(dev->bus_id, BUS_ID_SIZE, "%s", 727 dev_set_name(dev, priv->internal_name);
728 priv->internal_name);
729 dev->bus = &iucv_bus; 728 dev->bus = &iucv_bus;
730 dev->parent = iucv_root; 729 dev->parent = iucv_root;
731 dev->driver = &vmlogrdr_driver; 730 dev->driver = &vmlogrdr_driver;
@@ -748,10 +747,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
748 device_unregister(dev); 747 device_unregister(dev);
749 return ret; 748 return ret;
750 } 749 }
751 priv->class_device = device_create_drvdata(vmlogrdr_class, dev, 750 priv->class_device = device_create(vmlogrdr_class, dev,
752 MKDEV(vmlogrdr_major, 751 MKDEV(vmlogrdr_major,
753 priv->minor_num), 752 priv->minor_num),
754 priv, "%s", dev->bus_id); 753 priv, "%s", dev_name(dev));
755 if (IS_ERR(priv->class_device)) { 754 if (IS_ERR(priv->class_device)) {
756 ret = PTR_ERR(priv->class_device); 755 ret = PTR_ERR(priv->class_device);
757 priv->class_device=NULL; 756 priv->class_device=NULL;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index c1f352b84868..9020eba620ee 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -886,19 +886,18 @@ static int ur_set_online(struct ccw_device *cdev)
886 goto fail_free_cdev; 886 goto fail_free_cdev;
887 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 887 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
888 if (urd->class == DEV_CLASS_UR_I) 888 if (urd->class == DEV_CLASS_UR_I)
889 sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id); 889 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
890 if (urd->class == DEV_CLASS_UR_O) 890 if (urd->class == DEV_CLASS_UR_O)
891 sprintf(node_id, "vmpun-%s", cdev->dev.bus_id); 891 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
892 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 892 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
893 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); 893 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
894 } else { 894 } else {
895 rc = -EOPNOTSUPP; 895 rc = -EOPNOTSUPP;
896 goto fail_free_cdev; 896 goto fail_free_cdev;
897 } 897 }
898 898
899 urd->device = device_create_drvdata(vmur_class, NULL, 899 urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
900 urd->char_device->dev, NULL, 900 NULL, "%s", node_id);
901 "%s", node_id);
902 if (IS_ERR(urd->device)) { 901 if (IS_ERR(urd->device)) {
903 rc = PTR_ERR(urd->device); 902 rc = PTR_ERR(urd->device);
904 TRACE("ur_set_online: device_create rc=%d\n", rc); 903 TRACE("ur_set_online: device_create rc=%d\n", rc);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 0bfcbbe375c4..2f547b840ef0 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -24,6 +24,7 @@
24#include "cio.h" 24#include "cio.h"
25#include "cio_debug.h" 25#include "cio_debug.h"
26#include "css.h" 26#include "css.h"
27#include "device.h"
27 28
28/* 29/*
29 * "Blacklisting" of certain devices: 30 * "Blacklisting" of certain devices:
@@ -191,9 +192,9 @@ static int blacklist_parse_parameters(char *str, range_action action,
191 rc = blacklist_range(ra, from_ssid, to_ssid, from, to, 192 rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
192 msgtrigger); 193 msgtrigger);
193 if (rc) 194 if (rc)
194 totalrc = 1; 195 totalrc = -EINVAL;
195 } else 196 } else
196 totalrc = 1; 197 totalrc = -EINVAL;
197 } 198 }
198 199
199 return totalrc; 200 return totalrc;
@@ -240,8 +241,10 @@ static int blacklist_parse_proc_parameters(char *buf)
240 rc = blacklist_parse_parameters(buf, free, 0); 241 rc = blacklist_parse_parameters(buf, free, 0);
241 else if (strcmp("add", parm) == 0) 242 else if (strcmp("add", parm) == 0)
242 rc = blacklist_parse_parameters(buf, add, 0); 243 rc = blacklist_parse_parameters(buf, add, 0);
244 else if (strcmp("purge", parm) == 0)
245 return ccw_purge_blacklisted();
243 else 246 else
244 return 1; 247 return -EINVAL;
245 248
246 css_schedule_reprobe(); 249 css_schedule_reprobe();
247 250
@@ -353,7 +356,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
353 } 356 }
354 ret = blacklist_parse_proc_parameters(buf); 357 ret = blacklist_parse_proc_parameters(buf);
355 if (ret) 358 if (ret)
356 rc = -EINVAL; 359 rc = ret;
357 else 360 else
358 rc = user_len; 361 rc = user_len;
359 362
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e0ce65fca4e7..3ac2c2019f5e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -113,7 +113,8 @@ ccwgroup_release (struct device *dev)
113 113
114 for (i = 0; i < gdev->count; i++) { 114 for (i = 0; i < gdev->count; i++) {
115 if (gdev->cdev[i]) { 115 if (gdev->cdev[i]) {
116 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 116 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
117 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
117 put_device(&gdev->cdev[i]->dev); 118 put_device(&gdev->cdev[i]->dev);
118 } 119 }
119 } 120 }
@@ -268,8 +269,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
268 goto error; 269 goto error;
269 } 270 }
270 271
271 snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", 272 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
272 gdev->cdev[0]->dev.bus_id);
273 273
274 rc = device_add(&gdev->dev); 274 rc = device_add(&gdev->dev);
275 if (rc) 275 if (rc)
@@ -296,6 +296,7 @@ error:
296 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 296 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
297 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 297 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
298 put_device(&gdev->cdev[i]->dev); 298 put_device(&gdev->cdev[i]->dev);
299 gdev->cdev[i] = NULL;
299 } 300 }
300 mutex_unlock(&gdev->reg_mutex); 301 mutex_unlock(&gdev->reg_mutex);
301 put_device(&gdev->dev); 302 put_device(&gdev->dev);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index db00b0591733..1246f61a5338 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -393,8 +393,7 @@ int chp_new(struct chp_id chpid)
393 chp->state = 1; 393 chp->state = 1;
394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
395 chp->dev.release = chp_release; 395 chp->dev.release = chp_release;
396 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, 396 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
397 chpid.id);
398 397
399 /* Obtain channel path description and fill it in. */ 398 /* Obtain channel path description and fill it in. */
400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 399 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -423,7 +422,7 @@ int chp_new(struct chp_id chpid)
423 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 422 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
424 if (ret) { 423 if (ret) {
425 device_unregister(&chp->dev); 424 device_unregister(&chp->dev);
426 goto out_free; 425 goto out;
427 } 426 }
428 mutex_lock(&channel_subsystems[chpid.cssid]->mutex); 427 mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
429 if (channel_subsystems[chpid.cssid]->cm_enabled) { 428 if (channel_subsystems[chpid.cssid]->cm_enabled) {
@@ -432,14 +431,15 @@ int chp_new(struct chp_id chpid)
432 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); 431 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
433 device_unregister(&chp->dev); 432 device_unregister(&chp->dev);
434 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); 433 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
435 goto out_free; 434 goto out;
436 } 435 }
437 } 436 }
438 channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; 437 channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
439 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); 438 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
440 return ret; 439 goto out;
441out_free: 440out_free:
442 kfree(chp); 441 kfree(chp);
442out:
443 return ret; 443 return ret;
444} 444}
445 445
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 91ca87aa9f97..f49f0e502b8d 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -261,7 +261,7 @@ static int chsc_examine_irb(struct chsc_request *request)
261{ 261{
262 int backed_up; 262 int backed_up;
263 263
264 if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND) 264 if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
265 return -EIO; 265 return -EIO;
266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; 266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; 267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 33bff8fec7d1..3db2c386546f 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -114,6 +114,7 @@ cio_tpi(void)
114 struct tpi_info *tpi_info; 114 struct tpi_info *tpi_info;
115 struct subchannel *sch; 115 struct subchannel *sch;
116 struct irb *irb; 116 struct irb *irb;
117 int irq_context;
117 118
118 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 119 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
119 if (tpi (NULL) != 1) 120 if (tpi (NULL) != 1)
@@ -126,7 +127,9 @@ cio_tpi(void)
126 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 127 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
127 if (!sch) 128 if (!sch)
128 return 1; 129 return 1;
129 local_bh_disable(); 130 irq_context = in_interrupt();
131 if (!irq_context)
132 local_bh_disable();
130 irq_enter (); 133 irq_enter ();
131 spin_lock(sch->lock); 134 spin_lock(sch->lock);
132 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 135 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
@@ -134,7 +137,8 @@ cio_tpi(void)
134 sch->driver->irq(sch); 137 sch->driver->irq(sch);
135 spin_unlock(sch->lock); 138 spin_unlock(sch->lock);
136 irq_exit (); 139 irq_exit ();
137 _local_bh_enable(); 140 if (!irq_context)
141 _local_bh_enable();
138 return 1; 142 return 1;
139} 143}
140 144
@@ -153,7 +157,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
153 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " 157 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
154 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 158 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
155 sch->schid.sch_no); 159 sch->schid.sch_no);
156 sprintf(dbf_text, "no%s", sch->dev.bus_id); 160 sprintf(dbf_text, "no%s", dev_name(&sch->dev));
157 CIO_TRACE_EVENT(0, dbf_text); 161 CIO_TRACE_EVENT(0, dbf_text);
158 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 162 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
159 163
@@ -171,9 +175,10 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
171 union orb *orb; 175 union orb *orb;
172 176
173 CIO_TRACE_EVENT(4, "stIO"); 177 CIO_TRACE_EVENT(4, "stIO");
174 CIO_TRACE_EVENT(4, sch->dev.bus_id); 178 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
175 179
176 orb = &to_io_private(sch)->orb; 180 orb = &to_io_private(sch)->orb;
181 memset(orb, 0, sizeof(union orb));
177 /* sch is always under 2G. */ 182 /* sch is always under 2G. */
178 orb->cmd.intparm = (u32)(addr_t)sch; 183 orb->cmd.intparm = (u32)(addr_t)sch;
179 orb->cmd.fmt = 1; 184 orb->cmd.fmt = 1;
@@ -208,8 +213,10 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
208 case 1: /* status pending */ 213 case 1: /* status pending */
209 case 2: /* busy */ 214 case 2: /* busy */
210 return -EBUSY; 215 return -EBUSY;
211 default: /* device/path not operational */ 216 case 3: /* device/path not operational */
212 return cio_start_handle_notoper(sch, lpm); 217 return cio_start_handle_notoper(sch, lpm);
218 default:
219 return ccode;
213 } 220 }
214} 221}
215 222
@@ -229,7 +236,7 @@ cio_resume (struct subchannel *sch)
229 int ccode; 236 int ccode;
230 237
231 CIO_TRACE_EVENT (4, "resIO"); 238 CIO_TRACE_EVENT (4, "resIO");
232 CIO_TRACE_EVENT (4, sch->dev.bus_id); 239 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
233 240
234 ccode = rsch (sch->schid); 241 ccode = rsch (sch->schid);
235 242
@@ -266,7 +273,7 @@ cio_halt(struct subchannel *sch)
266 return -ENODEV; 273 return -ENODEV;
267 274
268 CIO_TRACE_EVENT (2, "haltIO"); 275 CIO_TRACE_EVENT (2, "haltIO");
269 CIO_TRACE_EVENT (2, sch->dev.bus_id); 276 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
270 277
271 /* 278 /*
272 * Issue "Halt subchannel" and process condition code 279 * Issue "Halt subchannel" and process condition code
@@ -301,7 +308,7 @@ cio_clear(struct subchannel *sch)
301 return -ENODEV; 308 return -ENODEV;
302 309
303 CIO_TRACE_EVENT (2, "clearIO"); 310 CIO_TRACE_EVENT (2, "clearIO");
304 CIO_TRACE_EVENT (2, sch->dev.bus_id); 311 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
305 312
306 /* 313 /*
307 * Issue "Clear subchannel" and process condition code 314 * Issue "Clear subchannel" and process condition code
@@ -337,7 +344,7 @@ cio_cancel (struct subchannel *sch)
337 return -ENODEV; 344 return -ENODEV;
338 345
339 CIO_TRACE_EVENT (2, "cancelIO"); 346 CIO_TRACE_EVENT (2, "cancelIO");
340 CIO_TRACE_EVENT (2, sch->dev.bus_id); 347 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
341 348
342 ccode = xsch (sch->schid); 349 ccode = xsch (sch->schid);
343 350
@@ -401,7 +408,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
401 int ret; 408 int ret;
402 409
403 CIO_TRACE_EVENT (2, "ensch"); 410 CIO_TRACE_EVENT (2, "ensch");
404 CIO_TRACE_EVENT (2, sch->dev.bus_id); 411 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
405 412
406 if (sch_is_pseudo_sch(sch)) 413 if (sch_is_pseudo_sch(sch))
407 return -EINVAL; 414 return -EINVAL;
@@ -451,7 +458,7 @@ int cio_disable_subchannel(struct subchannel *sch)
451 int ret; 458 int ret;
452 459
453 CIO_TRACE_EVENT (2, "dissch"); 460 CIO_TRACE_EVENT (2, "dissch");
454 CIO_TRACE_EVENT (2, sch->dev.bus_id); 461 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
455 462
456 if (sch_is_pseudo_sch(sch)) 463 if (sch_is_pseudo_sch(sch))
457 return 0; 464 return 0;
@@ -568,8 +575,10 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
568 } 575 }
569 mutex_init(&sch->reg_mutex); 576 mutex_init(&sch->reg_mutex);
570 /* Set a name for the subchannel */ 577 /* Set a name for the subchannel */
571 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, 578 if (cio_is_console(schid))
572 schid.sch_no); 579 sch->dev.init_name = cio_get_console_sch_name(schid);
580 else
581 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
573 582
574 /* 583 /*
575 * The first subchannel that is not-operational (ccode==3) 584 * The first subchannel that is not-operational (ccode==3)
@@ -674,6 +683,7 @@ do_IRQ (struct pt_regs *regs)
674 683
675#ifdef CONFIG_CCW_CONSOLE 684#ifdef CONFIG_CCW_CONSOLE
676static struct subchannel console_subchannel; 685static struct subchannel console_subchannel;
686static char console_sch_name[10] = "0.x.xxxx";
677static struct io_subchannel_private console_priv; 687static struct io_subchannel_private console_priv;
678static int console_subchannel_in_use; 688static int console_subchannel_in_use;
679 689
@@ -824,6 +834,12 @@ cio_get_console_subchannel(void)
824 return &console_subchannel; 834 return &console_subchannel;
825} 835}
826 836
837const char *cio_get_console_sch_name(struct subchannel_id schid)
838{
839 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
840 return (const char *)console_sch_name;
841}
842
827#endif 843#endif
828static int 844static int
829__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 845__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
@@ -843,19 +859,6 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
843 return -EBUSY; /* uhm... */ 859 return -EBUSY; /* uhm... */
844} 860}
845 861
846/* we can't use the normal udelay here, since it enables external interrupts */
847
848static void udelay_reset(unsigned long usecs)
849{
850 uint64_t start_cc, end_cc;
851
852 asm volatile ("STCK %0" : "=m" (start_cc));
853 do {
854 cpu_relax();
855 asm volatile ("STCK %0" : "=m" (end_cc));
856 } while (((end_cc - start_cc)/4096) < usecs);
857}
858
859static int 862static int
860__clear_io_subchannel_easy(struct subchannel_id schid) 863__clear_io_subchannel_easy(struct subchannel_id schid)
861{ 864{
@@ -871,7 +874,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
871 if (schid_equal(&ti.schid, &schid)) 874 if (schid_equal(&ti.schid, &schid))
872 return 0; 875 return 0;
873 } 876 }
874 udelay_reset(100); 877 udelay_simple(100);
875 } 878 }
876 return -EBUSY; 879 return -EBUSY;
877} 880}
@@ -879,7 +882,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
879static void __clear_chsc_subchannel_easy(void) 882static void __clear_chsc_subchannel_easy(void)
880{ 883{
881 /* It seems we can only wait for a bit here :/ */ 884 /* It seems we can only wait for a bit here :/ */
882 udelay_reset(100); 885 udelay_simple(100);
883} 886}
884 887
885static int pgm_check_occured; 888static int pgm_check_occured;
@@ -889,7 +892,7 @@ static void cio_reset_pgm_check_handler(void)
889 pgm_check_occured = 1; 892 pgm_check_occured = 1;
890} 893}
891 894
892static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) 895static int stsch_reset(struct subchannel_id schid, struct schib *addr)
893{ 896{
894 int rc; 897 int rc;
895 898
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 3b236d20e835..0fb24784e925 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -117,11 +117,15 @@ extern int cio_is_console(struct subchannel_id);
117extern struct subchannel *cio_get_console_subchannel(void); 117extern struct subchannel *cio_get_console_subchannel(void);
118extern spinlock_t * cio_get_console_lock(void); 118extern spinlock_t * cio_get_console_lock(void);
119extern void *cio_get_console_priv(void); 119extern void *cio_get_console_priv(void);
120extern const char *cio_get_console_sch_name(struct subchannel_id schid);
121extern const char *cio_get_console_cdev_name(struct subchannel *sch);
120#else 122#else
121#define cio_is_console(schid) 0 123#define cio_is_console(schid) 0
122#define cio_get_console_subchannel() NULL 124#define cio_get_console_subchannel() NULL
123#define cio_get_console_lock() NULL 125#define cio_get_console_lock() NULL
124#define cio_get_console_priv() NULL 126#define cio_get_console_priv() NULL
127#define cio_get_console_sch_name(schid) NULL
128#define cio_get_console_cdev_name(sch) NULL
125#endif 129#endif
126 130
127#endif 131#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 51489eff6b0b..76bbb1e74c29 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -633,6 +633,11 @@ channel_subsystem_release(struct device *dev)
633 633
634 css = to_css(dev); 634 css = to_css(dev);
635 mutex_destroy(&css->mutex); 635 mutex_destroy(&css->mutex);
636 if (css->pseudo_subchannel) {
637 /* Implies that it has been generated but never registered. */
638 css_subchannel_release(&css->pseudo_subchannel->dev);
639 css->pseudo_subchannel = NULL;
640 }
636 kfree(css); 641 kfree(css);
637} 642}
638 643
@@ -693,7 +698,7 @@ static int __init setup_css(int nr)
693 return -ENOMEM; 698 return -ENOMEM;
694 css->pseudo_subchannel->dev.parent = &css->device; 699 css->pseudo_subchannel->dev.parent = &css->device;
695 css->pseudo_subchannel->dev.release = css_subchannel_release; 700 css->pseudo_subchannel->dev.release = css_subchannel_release;
696 sprintf(css->pseudo_subchannel->dev.bus_id, "defunct"); 701 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
697 ret = cio_create_sch_lock(css->pseudo_subchannel); 702 ret = cio_create_sch_lock(css->pseudo_subchannel);
698 if (ret) { 703 if (ret) {
699 kfree(css->pseudo_subchannel); 704 kfree(css->pseudo_subchannel);
@@ -702,7 +707,7 @@ static int __init setup_css(int nr)
702 mutex_init(&css->mutex); 707 mutex_init(&css->mutex);
703 css->valid = 1; 708 css->valid = 1;
704 css->cssid = nr; 709 css->cssid = nr;
705 sprintf(css->device.bus_id, "css%x", nr); 710 dev_set_name(&css->device, "css%x", nr);
706 css->device.release = channel_subsystem_release; 711 css->device.release = channel_subsystem_release;
707 tod_high = (u32) (get_clock() >> 32); 712 tod_high = (u32) (get_clock() >> 32);
708 css_generate_pgid(css, tod_high); 713 css_generate_pgid(css, tod_high);
@@ -785,11 +790,15 @@ init_channel_subsystem (void)
785 } 790 }
786 channel_subsystems[i] = css; 791 channel_subsystems[i] = css;
787 ret = setup_css(i); 792 ret = setup_css(i);
788 if (ret) 793 if (ret) {
789 goto out_free; 794 kfree(channel_subsystems[i]);
795 goto out_unregister;
796 }
790 ret = device_register(&css->device); 797 ret = device_register(&css->device);
791 if (ret) 798 if (ret) {
792 goto out_free_all; 799 put_device(&css->device);
800 goto out_unregister;
801 }
793 if (css_chsc_characteristics.secm) { 802 if (css_chsc_characteristics.secm) {
794 ret = device_create_file(&css->device, 803 ret = device_create_file(&css->device,
795 &dev_attr_cm_enable); 804 &dev_attr_cm_enable);
@@ -802,7 +811,7 @@ init_channel_subsystem (void)
802 } 811 }
803 ret = register_reboot_notifier(&css_reboot_notifier); 812 ret = register_reboot_notifier(&css_reboot_notifier);
804 if (ret) 813 if (ret)
805 goto out_pseudo; 814 goto out_unregister;
806 css_init_done = 1; 815 css_init_done = 1;
807 816
808 /* Enable default isc for I/O subchannels. */ 817 /* Enable default isc for I/O subchannels. */
@@ -810,18 +819,12 @@ init_channel_subsystem (void)
810 819
811 for_each_subchannel(__init_channel_subsystem, NULL); 820 for_each_subchannel(__init_channel_subsystem, NULL);
812 return 0; 821 return 0;
813out_pseudo:
814 device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
815out_file: 822out_file:
816 device_remove_file(&channel_subsystems[i]->device, 823 if (css_chsc_characteristics.secm)
817 &dev_attr_cm_enable); 824 device_remove_file(&channel_subsystems[i]->device,
825 &dev_attr_cm_enable);
818out_device: 826out_device:
819 device_unregister(&channel_subsystems[i]->device); 827 device_unregister(&channel_subsystems[i]->device);
820out_free_all:
821 kfree(channel_subsystems[i]->pseudo_subchannel->lock);
822 kfree(channel_subsystems[i]->pseudo_subchannel);
823out_free:
824 kfree(channel_subsystems[i]);
825out_unregister: 828out_unregister:
826 while (i > 0) { 829 while (i > 0) {
827 struct channel_subsystem *css; 830 struct channel_subsystem *css;
@@ -829,6 +832,7 @@ out_unregister:
829 i--; 832 i--;
830 css = channel_subsystems[i]; 833 css = channel_subsystems[i];
831 device_unregister(&css->pseudo_subchannel->dev); 834 device_unregister(&css->pseudo_subchannel->dev);
835 css->pseudo_subchannel = NULL;
832 if (css_chsc_characteristics.secm) 836 if (css_chsc_characteristics.secm)
833 device_remove_file(&css->device, 837 device_remove_file(&css->device,
834 &dev_attr_cm_enable); 838 &dev_attr_cm_enable);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 28221030b886..4e78c82194b4 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -31,6 +31,7 @@
31#include "device.h" 31#include "device.h"
32#include "ioasm.h" 32#include "ioasm.h"
33#include "io_sch.h" 33#include "io_sch.h"
34#include "blacklist.h"
34 35
35static struct timer_list recovery_timer; 36static struct timer_list recovery_timer;
36static DEFINE_SPINLOCK(recovery_lock); 37static DEFINE_SPINLOCK(recovery_lock);
@@ -296,36 +297,33 @@ static void ccw_device_unregister(struct ccw_device *cdev)
296 device_del(&cdev->dev); 297 device_del(&cdev->dev);
297} 298}
298 299
299static void ccw_device_remove_orphan_cb(struct device *dev) 300static void ccw_device_remove_orphan_cb(struct work_struct *work)
300{ 301{
301 struct ccw_device *cdev = to_ccwdev(dev); 302 struct ccw_device_private *priv;
303 struct ccw_device *cdev;
302 304
305 priv = container_of(work, struct ccw_device_private, kick_work);
306 cdev = priv->cdev;
303 ccw_device_unregister(cdev); 307 ccw_device_unregister(cdev);
304 put_device(&cdev->dev); 308 put_device(&cdev->dev);
309 /* Release cdev reference for workqueue processing. */
310 put_device(&cdev->dev);
305} 311}
306 312
307static void ccw_device_remove_sch_cb(struct device *dev) 313static void ccw_device_call_sch_unregister(struct work_struct *work);
308{
309 struct subchannel *sch;
310
311 sch = to_subchannel(dev);
312 css_sch_device_unregister(sch);
313 /* Reset intparm to zeroes. */
314 sch->schib.pmcw.intparm = 0;
315 cio_modify(sch);
316 put_device(&sch->dev);
317}
318 314
319static void 315static void
320ccw_device_remove_disconnected(struct ccw_device *cdev) 316ccw_device_remove_disconnected(struct ccw_device *cdev)
321{ 317{
322 unsigned long flags; 318 unsigned long flags;
323 int rc;
324 319
325 /* 320 /*
326 * Forced offline in disconnected state means 321 * Forced offline in disconnected state means
327 * 'throw away device'. 322 * 'throw away device'.
328 */ 323 */
324 /* Get cdev reference for workqueue processing. */
325 if (!get_device(&cdev->dev))
326 return;
329 if (ccw_device_is_orphan(cdev)) { 327 if (ccw_device_is_orphan(cdev)) {
330 /* 328 /*
331 * Deregister ccw device. 329 * Deregister ccw device.
@@ -335,23 +333,13 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
335 spin_lock_irqsave(cdev->ccwlock, flags); 333 spin_lock_irqsave(cdev->ccwlock, flags);
336 cdev->private->state = DEV_STATE_NOT_OPER; 334 cdev->private->state = DEV_STATE_NOT_OPER;
337 spin_unlock_irqrestore(cdev->ccwlock, flags); 335 spin_unlock_irqrestore(cdev->ccwlock, flags);
338 rc = device_schedule_callback(&cdev->dev, 336 PREPARE_WORK(&cdev->private->kick_work,
339 ccw_device_remove_orphan_cb); 337 ccw_device_remove_orphan_cb);
340 if (rc) 338 } else
341 CIO_MSG_EVENT(0, "Couldn't unregister orphan " 339 /* Deregister subchannel, which will kill the ccw device. */
342 "0.%x.%04x\n", 340 PREPARE_WORK(&cdev->private->kick_work,
343 cdev->private->dev_id.ssid, 341 ccw_device_call_sch_unregister);
344 cdev->private->dev_id.devno); 342 queue_work(slow_path_wq, &cdev->private->kick_work);
345 return;
346 }
347 /* Deregister subchannel, which will kill the ccw device. */
348 rc = device_schedule_callback(cdev->dev.parent,
349 ccw_device_remove_sch_cb);
350 if (rc)
351 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
352 "0.%x.%04x\n",
353 cdev->private->dev_id.ssid,
354 cdev->private->dev_id.devno);
355} 343}
356 344
357/** 345/**
@@ -970,12 +958,17 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
970 958
971 priv = container_of(work, struct ccw_device_private, kick_work); 959 priv = container_of(work, struct ccw_device_private, kick_work);
972 cdev = priv->cdev; 960 cdev = priv->cdev;
961 /* Get subchannel reference for local processing. */
962 if (!get_device(cdev->dev.parent))
963 return;
973 sch = to_subchannel(cdev->dev.parent); 964 sch = to_subchannel(cdev->dev.parent);
974 css_sch_device_unregister(sch); 965 css_sch_device_unregister(sch);
975 /* Reset intparm to zeroes. */ 966 /* Reset intparm to zeroes. */
976 sch->schib.pmcw.intparm = 0; 967 sch->schib.pmcw.intparm = 0;
977 cio_modify(sch); 968 cio_modify(sch);
969 /* Release cdev reference for workqueue processing.*/
978 put_device(&cdev->dev); 970 put_device(&cdev->dev);
971 /* Release subchannel reference for local processing. */
979 put_device(&sch->dev); 972 put_device(&sch->dev);
980} 973}
981 974
@@ -1001,6 +994,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1001 PREPARE_WORK(&cdev->private->kick_work, 994 PREPARE_WORK(&cdev->private->kick_work,
1002 ccw_device_call_sch_unregister); 995 ccw_device_call_sch_unregister);
1003 queue_work(slow_path_wq, &cdev->private->kick_work); 996 queue_work(slow_path_wq, &cdev->private->kick_work);
997 /* Release subchannel reference for asynchronous recognition. */
998 put_device(&sch->dev);
1004 if (atomic_dec_and_test(&ccw_device_init_count)) 999 if (atomic_dec_and_test(&ccw_device_init_count))
1005 wake_up(&ccw_device_init_wq); 1000 wake_up(&ccw_device_init_wq);
1006 break; 1001 break;
@@ -1040,8 +1035,11 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1040 init_timer(&priv->timer); 1035 init_timer(&priv->timer);
1041 1036
1042 /* Set an initial name for the device. */ 1037 /* Set an initial name for the device. */
1043 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 1038 if (cio_is_console(sch->schid))
1044 sch->schid.ssid, sch->schib.pmcw.dev); 1039 cdev->dev.init_name = cio_get_console_cdev_name(sch);
1040 else
1041 dev_set_name(&cdev->dev, "0.%x.%04x",
1042 sch->schid.ssid, sch->schib.pmcw.dev);
1045 1043
1046 /* Increase counter of devices currently in recognition. */ 1044 /* Increase counter of devices currently in recognition. */
1047 atomic_inc(&ccw_device_init_count); 1045 atomic_inc(&ccw_device_init_count);
@@ -1106,7 +1104,7 @@ static void io_subchannel_irq(struct subchannel *sch)
1106 cdev = sch_get_cdev(sch); 1104 cdev = sch_get_cdev(sch);
1107 1105
1108 CIO_TRACE_EVENT(3, "IRQ"); 1106 CIO_TRACE_EVENT(3, "IRQ");
1109 CIO_TRACE_EVENT(3, sch->dev.bus_id); 1107 CIO_TRACE_EVENT(3, dev_name(&sch->dev));
1110 if (cdev) 1108 if (cdev)
1111 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1109 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1112} 1110}
@@ -1476,6 +1474,45 @@ static void ccw_device_schedule_recovery(void)
1476 spin_unlock_irqrestore(&recovery_lock, flags); 1474 spin_unlock_irqrestore(&recovery_lock, flags);
1477} 1475}
1478 1476
1477static int purge_fn(struct device *dev, void *data)
1478{
1479 struct ccw_device *cdev = to_ccwdev(dev);
1480 struct ccw_device_private *priv = cdev->private;
1481 int unreg;
1482
1483 spin_lock_irq(cdev->ccwlock);
1484 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
1485 (priv->state == DEV_STATE_OFFLINE);
1486 spin_unlock_irq(cdev->ccwlock);
1487 if (!unreg)
1488 goto out;
1489 if (!get_device(&cdev->dev))
1490 goto out;
1491 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1492 priv->dev_id.devno);
1493 PREPARE_WORK(&cdev->private->kick_work, ccw_device_call_sch_unregister);
1494 queue_work(slow_path_wq, &cdev->private->kick_work);
1495
1496out:
1497 /* Abort loop in case of pending signal. */
1498 if (signal_pending(current))
1499 return -EINTR;
1500
1501 return 0;
1502}
1503
1504/**
1505 * ccw_purge_blacklisted - purge unused, blacklisted devices
1506 *
1507 * Unregister all ccw devices that are offline and on the blacklist.
1508 */
1509int ccw_purge_blacklisted(void)
1510{
1511 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1512 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1513 return 0;
1514}
1515
1479static void device_set_disconnected(struct ccw_device *cdev) 1516static void device_set_disconnected(struct ccw_device *cdev)
1480{ 1517{
1481 if (!cdev) 1518 if (!cdev)
@@ -1492,7 +1529,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
1492 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1529 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1493 1530
1494 CIO_TRACE_EVENT(2, "notoper"); 1531 CIO_TRACE_EVENT(2, "notoper");
1495 CIO_TRACE_EVENT(2, sch->dev.bus_id); 1532 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1496 ccw_device_set_timeout(cdev, 0); 1533 ccw_device_set_timeout(cdev, 0);
1497 cio_disable_subchannel(sch); 1534 cio_disable_subchannel(sch);
1498 cdev->private->state = DEV_STATE_NOT_OPER; 1535 cdev->private->state = DEV_STATE_NOT_OPER;
@@ -1591,6 +1628,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1591 1628
1592#ifdef CONFIG_CCW_CONSOLE 1629#ifdef CONFIG_CCW_CONSOLE
1593static struct ccw_device console_cdev; 1630static struct ccw_device console_cdev;
1631static char console_cdev_name[10] = "0.x.xxxx";
1594static struct ccw_device_private console_private; 1632static struct ccw_device_private console_private;
1595static int console_cdev_in_use; 1633static int console_cdev_in_use;
1596 1634
@@ -1661,6 +1699,14 @@ ccw_device_probe_console(void)
1661 console_cdev.online = 1; 1699 console_cdev.online = 1;
1662 return &console_cdev; 1700 return &console_cdev;
1663} 1701}
1702
1703
1704const char *cio_get_console_cdev_name(struct subchannel *sch)
1705{
1706 snprintf(console_cdev_name, 10, "0.%x.%04x",
1707 sch->schid.ssid, sch->schib.pmcw.dev);
1708 return (const char *)console_cdev_name;
1709}
1664#endif 1710#endif
1665 1711
1666/* 1712/*
@@ -1673,7 +1719,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
1673 1719
1674 bus_id = id; 1720 bus_id = id;
1675 1721
1676 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1722 return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0);
1677} 1723}
1678 1724
1679 1725
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 6f5c3f2b3587..104ed669db43 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -86,6 +86,7 @@ int ccw_device_is_orphan(struct ccw_device *);
86int ccw_device_recognition(struct ccw_device *); 86int ccw_device_recognition(struct ccw_device *);
87int ccw_device_online(struct ccw_device *); 87int ccw_device_online(struct ccw_device *);
88int ccw_device_offline(struct ccw_device *); 88int ccw_device_offline(struct ccw_device *);
89int ccw_purge_blacklisted(void);
89 90
90/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
91void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 550508df952b..10bc03940fb3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -52,8 +52,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
52 printk(KERN_WARNING "cio: orb:\n"); 52 printk(KERN_WARNING "cio: orb:\n");
53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
54 orb, sizeof(*orb), 0); 54 orb, sizeof(*orb), 0);
55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n",
56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 56 dev_name(&cdev->dev));
57 printk(KERN_WARNING "cio: subchannel bus id: %s\n",
58 dev_name(&sch->dev));
57 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 59 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
58 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 60 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
59 61
@@ -658,6 +660,13 @@ ccw_device_offline(struct ccw_device *cdev)
658{ 660{
659 struct subchannel *sch; 661 struct subchannel *sch;
660 662
663 /* Allow ccw_device_offline while disconnected. */
664 if (cdev->private->state == DEV_STATE_DISCONNECTED ||
665 cdev->private->state == DEV_STATE_NOT_OPER) {
666 cdev->private->flags.donotify = 0;
667 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
668 return 0;
669 }
661 if (ccw_device_is_orphan(cdev)) { 670 if (ccw_device_is_orphan(cdev)) {
662 ccw_device_done(cdev, DEV_STATE_OFFLINE); 671 ccw_device_done(cdev, DEV_STATE_OFFLINE);
663 return 0; 672 return 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ee1a28310fbb..eabcc42d63df 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -498,7 +498,7 @@ ccw_device_stlck(struct ccw_device *cdev)
498 sch = to_subchannel(cdev->dev.parent); 498 sch = to_subchannel(cdev->dev.parent);
499 499
500 CIO_TRACE_EVENT(2, "stl lock"); 500 CIO_TRACE_EVENT(2, "stl lock");
501 CIO_TRACE_EVENT(2, cdev->dev.bus_id); 501 CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
502 502
503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
504 if (!buf) 504 if (!buf)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 3f8f1cf69c76..c4f3e7c9a854 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -123,7 +123,7 @@ struct ccw_device_private {
123 void *cmb_wait; /* deferred cmb enable/disable */ 123 void *cmb_wait; /* deferred cmb enable/disable */
124}; 124};
125 125
126static inline int ssch(struct subchannel_id schid, volatile union orb *addr) 126static inline int ssch(struct subchannel_id schid, union orb *addr)
127{ 127{
128 register struct subchannel_id reg1 asm("1") = schid; 128 register struct subchannel_id reg1 asm("1") = schid;
129 int ccode = -EIO; 129 int ccode = -EIO;
@@ -134,7 +134,9 @@ static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
134 " srl %0,28\n" 134 " srl %0,28\n"
135 "1:\n" 135 "1:\n"
136 EX_TABLE(0b, 1b) 136 EX_TABLE(0b, 1b)
137 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 137 : "+d" (ccode)
138 : "d" (reg1), "a" (addr), "m" (*addr)
139 : "cc", "memory");
138 return ccode; 140 return ccode;
139} 141}
140 142
@@ -147,7 +149,9 @@ static inline int rsch(struct subchannel_id schid)
147 " rsch\n" 149 " rsch\n"
148 " ipm %0\n" 150 " ipm %0\n"
149 " srl %0,28" 151 " srl %0,28"
150 : "=d" (ccode) : "d" (reg1) : "cc"); 152 : "=d" (ccode)
153 : "d" (reg1)
154 : "cc", "memory");
151 return ccode; 155 return ccode;
152} 156}
153 157
@@ -160,7 +164,9 @@ static inline int csch(struct subchannel_id schid)
160 " csch\n" 164 " csch\n"
161 " ipm %0\n" 165 " ipm %0\n"
162 " srl %0,28" 166 " srl %0,28"
163 : "=d" (ccode) : "d" (reg1) : "cc"); 167 : "=d" (ccode)
168 : "d" (reg1)
169 : "cc");
164 return ccode; 170 return ccode;
165} 171}
166 172
@@ -173,7 +179,9 @@ static inline int hsch(struct subchannel_id schid)
173 " hsch\n" 179 " hsch\n"
174 " ipm %0\n" 180 " ipm %0\n"
175 " srl %0,28" 181 " srl %0,28"
176 : "=d" (ccode) : "d" (reg1) : "cc"); 182 : "=d" (ccode)
183 : "d" (reg1)
184 : "cc");
177 return ccode; 185 return ccode;
178} 186}
179 187
@@ -186,7 +194,9 @@ static inline int xsch(struct subchannel_id schid)
186 " .insn rre,0xb2760000,%1,0\n" 194 " .insn rre,0xb2760000,%1,0\n"
187 " ipm %0\n" 195 " ipm %0\n"
188 " srl %0,28" 196 " srl %0,28"
189 : "=d" (ccode) : "d" (reg1) : "cc"); 197 : "=d" (ccode)
198 : "d" (reg1)
199 : "cc");
190 return ccode; 200 return ccode;
191} 201}
192 202
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 9fa2ac13ac85..759262792633 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -23,38 +23,39 @@ struct tpi_info {
23 * Some S390 specific IO instructions as inline 23 * Some S390 specific IO instructions as inline
24 */ 24 */
25 25
26static inline int stsch(struct subchannel_id schid, 26static inline int stsch(struct subchannel_id schid, struct schib *addr)
27 volatile struct schib *addr)
28{ 27{
29 register struct subchannel_id reg1 asm ("1") = schid; 28 register struct subchannel_id reg1 asm ("1") = schid;
30 int ccode; 29 int ccode;
31 30
32 asm volatile( 31 asm volatile(
33 " stsch 0(%2)\n" 32 " stsch 0(%3)\n"
34 " ipm %0\n" 33 " ipm %0\n"
35 " srl %0,28" 34 " srl %0,28"
36 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 35 : "=d" (ccode), "=m" (*addr)
36 : "d" (reg1), "a" (addr)
37 : "cc");
37 return ccode; 38 return ccode;
38} 39}
39 40
40static inline int stsch_err(struct subchannel_id schid, 41static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
41 volatile struct schib *addr)
42{ 42{
43 register struct subchannel_id reg1 asm ("1") = schid; 43 register struct subchannel_id reg1 asm ("1") = schid;
44 int ccode = -EIO; 44 int ccode = -EIO;
45 45
46 asm volatile( 46 asm volatile(
47 " stsch 0(%2)\n" 47 " stsch 0(%3)\n"
48 "0: ipm %0\n" 48 "0: ipm %0\n"
49 " srl %0,28\n" 49 " srl %0,28\n"
50 "1:\n" 50 "1:\n"
51 EX_TABLE(0b,1b) 51 EX_TABLE(0b,1b)
52 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 52 : "+d" (ccode), "=m" (*addr)
53 : "d" (reg1), "a" (addr)
54 : "cc");
53 return ccode; 55 return ccode;
54} 56}
55 57
56static inline int msch(struct subchannel_id schid, 58static inline int msch(struct subchannel_id schid, struct schib *addr)
57 volatile struct schib *addr)
58{ 59{
59 register struct subchannel_id reg1 asm ("1") = schid; 60 register struct subchannel_id reg1 asm ("1") = schid;
60 int ccode; 61 int ccode;
@@ -63,12 +64,13 @@ static inline int msch(struct subchannel_id schid,
63 " msch 0(%2)\n" 64 " msch 0(%2)\n"
64 " ipm %0\n" 65 " ipm %0\n"
65 " srl %0,28" 66 " srl %0,28"
66 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 67 : "=d" (ccode)
68 : "d" (reg1), "a" (addr), "m" (*addr)
69 : "cc");
67 return ccode; 70 return ccode;
68} 71}
69 72
70static inline int msch_err(struct subchannel_id schid, 73static inline int msch_err(struct subchannel_id schid, struct schib *addr)
71 volatile struct schib *addr)
72{ 74{
73 register struct subchannel_id reg1 asm ("1") = schid; 75 register struct subchannel_id reg1 asm ("1") = schid;
74 int ccode = -EIO; 76 int ccode = -EIO;
@@ -79,33 +81,38 @@ static inline int msch_err(struct subchannel_id schid,
79 " srl %0,28\n" 81 " srl %0,28\n"
80 "1:\n" 82 "1:\n"
81 EX_TABLE(0b,1b) 83 EX_TABLE(0b,1b)
82 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 84 : "+d" (ccode)
85 : "d" (reg1), "a" (addr), "m" (*addr)
86 : "cc");
83 return ccode; 87 return ccode;
84} 88}
85 89
86static inline int tsch(struct subchannel_id schid, 90static inline int tsch(struct subchannel_id schid, struct irb *addr)
87 volatile struct irb *addr)
88{ 91{
89 register struct subchannel_id reg1 asm ("1") = schid; 92 register struct subchannel_id reg1 asm ("1") = schid;
90 int ccode; 93 int ccode;
91 94
92 asm volatile( 95 asm volatile(
93 " tsch 0(%2)\n" 96 " tsch 0(%3)\n"
94 " ipm %0\n" 97 " ipm %0\n"
95 " srl %0,28" 98 " srl %0,28"
96 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 99 : "=d" (ccode), "=m" (*addr)
100 : "d" (reg1), "a" (addr)
101 : "cc");
97 return ccode; 102 return ccode;
98} 103}
99 104
100static inline int tpi( volatile struct tpi_info *addr) 105static inline int tpi(struct tpi_info *addr)
101{ 106{
102 int ccode; 107 int ccode;
103 108
104 asm volatile( 109 asm volatile(
105 " tpi 0(%1)\n" 110 " tpi 0(%2)\n"
106 " ipm %0\n" 111 " ipm %0\n"
107 " srl %0,28" 112 " srl %0,28"
108 : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc"); 113 : "=d" (ccode), "=m" (*addr)
114 : "a" (addr)
115 : "cc");
109 return ccode; 116 return ccode;
110} 117}
111 118
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c1a70985abfa..e3ea1d5f2810 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -16,6 +16,14 @@
16#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ 16#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ 17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
18 18
19/*
20 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
21 * till next initiative to give transmitted skbs back to the stack is too long.
22 * Therefore polling is started in case of multicast queue is filled more
23 * than 50 percent.
24 */
25#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
26
19enum qdio_irq_states { 27enum qdio_irq_states {
20 QDIO_IRQ_STATE_INACTIVE, 28 QDIO_IRQ_STATE_INACTIVE,
21 QDIO_IRQ_STATE_ESTABLISHED, 29 QDIO_IRQ_STATE_ESTABLISHED,
@@ -195,6 +203,9 @@ struct qdio_output_q {
195 /* PCIs are enabled for the queue */ 203 /* PCIs are enabled for the queue */
196 int pci_out_enabled; 204 int pci_out_enabled;
197 205
206 /* IQDIO: output multiple buffers (enhanced SIGA) */
207 int use_enh_siga;
208
198 /* timer to check for more outbound work */ 209 /* timer to check for more outbound work */
199 struct timer_list timer; 210 struct timer_list timer;
200}; 211};
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 337aa3087a78..b5390821434f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -155,7 +155,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name) 155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
156{ 156{
157 memset(name, 0, sizeof(name)); 157 memset(name, 0, sizeof(name));
158 sprintf(name, "%s", cdev->dev.bus_id); 158 sprintf(name, "%s", dev_name(&cdev->dev));
159 if (q->is_input_q) 159 if (q->is_input_q)
160 sprintf(name + strlen(name), "_input"); 160 sprintf(name + strlen(name), "_input");
161 else 161 else
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e6eabc853422..a50682d2a0fa 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -316,6 +316,9 @@ static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
316 unsigned int fc = 0; 316 unsigned int fc = 0;
317 unsigned long schid; 317 unsigned long schid;
318 318
319 if (q->u.out.use_enh_siga) {
320 fc = 3;
321 }
319 if (!is_qebsm(q)) 322 if (!is_qebsm(q))
320 schid = *((u32 *)&q->irq_ptr->schid); 323 schid = *((u32 *)&q->irq_ptr->schid);
321 else { 324 else {
@@ -851,6 +854,12 @@ static void __qdio_outbound_processing(struct qdio_q *q)
851 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 854 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
852 return; 855 return;
853 856
857 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
858 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
859 tasklet_schedule(&q->tasklet);
860 return;
861 }
862
854 if (q->u.out.pci_out_enabled) 863 if (q->u.out.pci_out_enabled)
855 return; 864 return;
856 865
@@ -956,7 +965,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
956 char dbf_text[15]; 965 char dbf_text[15];
957 966
958 QDIO_DBF_TEXT2(1, trace, "ick2"); 967 QDIO_DBF_TEXT2(1, trace, "ick2");
959 sprintf(dbf_text, "%s", cdev->dev.bus_id); 968 sprintf(dbf_text, "%s", dev_name(&cdev->dev));
960 QDIO_DBF_TEXT2(1, trace, dbf_text); 969 QDIO_DBF_TEXT2(1, trace, dbf_text);
961 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); 970 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
962 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); 971 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
@@ -1443,6 +1452,8 @@ int qdio_establish(struct qdio_initialize *init_data)
1443 } 1452 }
1444 1453
1445 qdio_setup_ssqd_info(irq_ptr); 1454 qdio_setup_ssqd_info(irq_ptr);
1455 sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc);
1456 QDIO_DBF_TEXT2(0, setup, dbf_text);
1446 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); 1457 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1447 QDIO_DBF_TEXT2(0, setup, dbf_text); 1458 QDIO_DBF_TEXT2(0, setup, dbf_text);
1448 1459
@@ -1615,12 +1626,21 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1615 if (multicast_outbound(q)) 1626 if (multicast_outbound(q))
1616 qdio_kick_outbound_q(q); 1627 qdio_kick_outbound_q(q);
1617 else 1628 else
1618 /* 1629 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1619 * One siga-w per buffer required for unicast 1630 (count > 1) &&
1620 * HiperSockets. 1631 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1621 */ 1632 /* exploit enhanced SIGA */
1622 while (count--) 1633 q->u.out.use_enh_siga = 1;
1623 qdio_kick_outbound_q(q); 1634 qdio_kick_outbound_q(q);
1635 } else {
1636 /*
1637 * One siga-w per buffer required for unicast
1638 * HiperSockets.
1639 */
1640 q->u.out.use_enh_siga = 0;
1641 while (count--)
1642 qdio_kick_outbound_q(q);
1643 }
1624 goto out; 1644 goto out;
1625 } 1645 }
1626 1646
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 1679e2f91c94..a0b6b46e7466 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
447{ 447{
448 char s[80]; 448 char s[80];
449 449
450 sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); 450 sprintf(s, "qdio: %s ", dev_name(&cdev->dev));
451
452 switch (irq_ptr->qib.qfmt) { 451 switch (irq_ptr->qib.qfmt) {
453 case QDIO_QETH_QFMT: 452 case QDIO_QETH_QFMT:
454 sprintf(s + strlen(s), "OSADE "); 453 sprintf(s + strlen(s), "OSA ");
455 break; 454 break;
456 case QDIO_ZFCP_QFMT: 455 case QDIO_ZFCP_QFMT:
457 sprintf(s + strlen(s), "ZFCP "); 456 sprintf(s + strlen(s), "ZFCP ");
458 break; 457 break;
459 case QDIO_IQDIO_QFMT: 458 case QDIO_IQDIO_QFMT:
460 sprintf(s + strlen(s), "HiperSockets "); 459 sprintf(s + strlen(s), "HS ");
461 break; 460 break;
462 } 461 }
463 sprintf(s + strlen(s), "using: "); 462 sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no);
464 463 sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr));
465 if (!is_thinint_irq(irq_ptr)) 464 sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0);
466 sprintf(s + strlen(s), "no"); 465 sprintf(s + strlen(s), "PCI:%d ",
467 sprintf(s + strlen(s), "AdapterInterrupts "); 466 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0);
468 if (!(irq_ptr->sch_token != 0)) 467 sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd);
469 sprintf(s + strlen(s), "no"); 468 sprintf(s + strlen(s), "SIGA:");
470 sprintf(s + strlen(s), "QEBSM "); 469 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " ");
471 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 470 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " ");
472 sprintf(s + strlen(s), "no"); 471 sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " ");
473 sprintf(s + strlen(s), "OutboundPCI "); 472 sprintf(s + strlen(s), "%s",
474 if (!css_general_characteristics.aif_tdd) 473 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ");
475 sprintf(s + strlen(s), "no"); 474 sprintf(s + strlen(s), "%s",
476 sprintf(s + strlen(s), "TDD\n"); 475 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ");
477 printk(KERN_INFO "qdio: %s", s); 476 sprintf(s + strlen(s), "%s",
478 477 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
479 memset(s, 0, sizeof(s));
480 sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
481 if (irq_ptr->siga_flag.input)
482 sprintf(s + strlen(s), "Read ");
483 if (irq_ptr->siga_flag.output)
484 sprintf(s + strlen(s), "Write ");
485 if (irq_ptr->siga_flag.sync)
486 sprintf(s + strlen(s), "Sync ");
487 if (!irq_ptr->siga_flag.no_sync_ti)
488 sprintf(s + strlen(s), "SyncAI ");
489 if (!irq_ptr->siga_flag.no_sync_out_ti)
490 sprintf(s + strlen(s), "SyncOutAI ");
491 if (!irq_ptr->siga_flag.no_sync_out_pci)
492 sprintf(s + strlen(s), "SyncOutPCI");
493 sprintf(s + strlen(s), "\n"); 478 sprintf(s + strlen(s), "\n");
494 printk(KERN_INFO "qdio: %s", s); 479 printk(KERN_INFO "%s", s);
495} 480}
496 481
497int __init qdio_setup_init(void) 482int __init qdio_setup_init(void)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 6f02f1e674d4..e3fe6838293a 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -892,8 +892,8 @@ static void ap_scan_bus(struct work_struct *unused)
892 892
893 ap_dev->device.bus = &ap_bus_type; 893 ap_dev->device.bus = &ap_bus_type;
894 ap_dev->device.parent = ap_root_device; 894 ap_dev->device.parent = ap_root_device;
895 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x", 895 dev_set_name(&ap_dev->device, "card%02x",
896 AP_QID_DEVICE(ap_dev->qid)); 896 AP_QID_DEVICE(ap_dev->qid));
897 ap_dev->device.release = ap_device_release; 897 ap_dev->device.release = ap_device_release;
898 rc = device_register(&ap_dev->device); 898 rc = device_register(&ap_dev->device);
899 if (rc) { 899 if (rc) {
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 292b60da6dc7..ff4a6931bb8e 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -24,6 +24,7 @@
24#include <asm/kvm_virtio.h> 24#include <asm/kvm_virtio.h>
25#include <asm/setup.h> 25#include <asm/setup.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/s390_rdev.h>
27 28
28#define VIRTIO_SUBCODE_64 0x0D00 29#define VIRTIO_SUBCODE_64 0x0D00
29 30
@@ -241,10 +242,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
241 * The root device for the kvm virtio devices. 242 * The root device for the kvm virtio devices.
242 * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2. 243 * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
243 */ 244 */
244static struct device kvm_root = { 245static struct device *kvm_root;
245 .parent = NULL,
246 .bus_id = "kvm_s390",
247};
248 246
249/* 247/*
250 * adds a new device and register it with virtio 248 * adds a new device and register it with virtio
@@ -261,7 +259,7 @@ static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
261 return; 259 return;
262 } 260 }
263 261
264 kdev->vdev.dev.parent = &kvm_root; 262 kdev->vdev.dev.parent = kvm_root;
265 kdev->vdev.id.device = d->type; 263 kdev->vdev.id.device = d->type;
266 kdev->vdev.config = &kvm_vq_configspace_ops; 264 kdev->vdev.config = &kvm_vq_configspace_ops;
267 kdev->desc = d; 265 kdev->desc = d;
@@ -317,15 +315,16 @@ static int __init kvm_devices_init(void)
317 if (!MACHINE_IS_KVM) 315 if (!MACHINE_IS_KVM)
318 return -ENODEV; 316 return -ENODEV;
319 317
320 rc = device_register(&kvm_root); 318 kvm_root = s390_root_dev_register("kvm_s390");
321 if (rc) { 319 if (IS_ERR(kvm_root)) {
320 rc = PTR_ERR(kvm_root);
322 printk(KERN_ERR "Could not register kvm_s390 root device"); 321 printk(KERN_ERR "Could not register kvm_s390 root device");
323 return rc; 322 return rc;
324 } 323 }
325 324
326 rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); 325 rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE);
327 if (rc) { 326 if (rc) {
328 device_unregister(&kvm_root); 327 s390_root_dev_unregister(kvm_root);
329 return rc; 328 return rc;
330 } 329 }
331 330
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index e10ac9ab2d44..f5e618562c5f 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -299,7 +299,7 @@ claw_probe(struct ccwgroup_device *cgdev)
299 probe_error(cgdev); 299 probe_error(cgdev);
300 put_device(&cgdev->dev); 300 put_device(&cgdev->dev);
301 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", 301 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
302 cgdev->cdev[0]->dev.bus_id,__func__,__LINE__); 302 dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__);
303 CLAW_DBF_TEXT_(2, setup, "probex%d", rc); 303 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
304 return rc; 304 return rc;
305 } 305 }
@@ -584,7 +584,7 @@ claw_irq_handler(struct ccw_device *cdev,
584 if (!cdev->dev.driver_data) { 584 if (!cdev->dev.driver_data) {
585 printk(KERN_WARNING "claw: unsolicited interrupt for device:" 585 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
586 "%s received c-%02x d-%02x\n", 586 "%s received c-%02x d-%02x\n",
587 cdev->dev.bus_id, irb->scsw.cmd.cstat, 587 dev_name(&cdev->dev), irb->scsw.cmd.cstat,
588 irb->scsw.cmd.dstat); 588 irb->scsw.cmd.dstat);
589 CLAW_DBF_TEXT(2, trace, "badirq"); 589 CLAW_DBF_TEXT(2, trace, "badirq");
590 return; 590 return;
@@ -598,7 +598,7 @@ claw_irq_handler(struct ccw_device *cdev,
598 p_ch = &privptr->channel[WRITE]; 598 p_ch = &privptr->channel[WRITE];
599 else { 599 else {
600 printk(KERN_WARNING "claw: Can't determine channel for " 600 printk(KERN_WARNING "claw: Can't determine channel for "
601 "interrupt, device %s\n", cdev->dev.bus_id); 601 "interrupt, device %s\n", dev_name(&cdev->dev));
602 CLAW_DBF_TEXT(2, trace, "badchan"); 602 CLAW_DBF_TEXT(2, trace, "badchan");
603 return; 603 return;
604 } 604 }
@@ -662,7 +662,7 @@ claw_irq_handler(struct ccw_device *cdev,
662 printk(KERN_WARNING "claw: unsolicited " 662 printk(KERN_WARNING "claw: unsolicited "
663 "interrupt for device:" 663 "interrupt for device:"
664 "%s received c-%02x d-%02x\n", 664 "%s received c-%02x d-%02x\n",
665 cdev->dev.bus_id, 665 dev_name(&cdev->dev),
666 irb->scsw.cmd.cstat, 666 irb->scsw.cmd.cstat,
667 irb->scsw.cmd.dstat); 667 irb->scsw.cmd.dstat);
668 return; 668 return;
@@ -1136,19 +1136,20 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1136 break; 1136 break;
1137 case -ENODEV: 1137 case -ENODEV:
1138 printk(KERN_EMERG "%s: Missing device called " 1138 printk(KERN_EMERG "%s: Missing device called "
1139 "for IO ENODEV\n", cdev->dev.bus_id); 1139 "for IO ENODEV\n", dev_name(&cdev->dev));
1140 break; 1140 break;
1141 case -EIO: 1141 case -EIO:
1142 printk(KERN_EMERG "%s: Status pending... EIO \n", 1142 printk(KERN_EMERG "%s: Status pending... EIO \n",
1143 cdev->dev.bus_id); 1143 dev_name(&cdev->dev));
1144 break; 1144 break;
1145 case -EINVAL: 1145 case -EINVAL:
1146 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", 1146 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
1147 cdev->dev.bus_id); 1147 dev_name(&cdev->dev));
1148 break; 1148 break;
1149 default: 1149 default:
1150 printk(KERN_EMERG "%s: Unknown error in " 1150 printk(KERN_EMERG "%s: Unknown error in "
1151 "Do_IO %d\n",cdev->dev.bus_id, return_code); 1151 "Do_IO %d\n", dev_name(&cdev->dev),
1152 return_code);
1152 } 1153 }
1153 } 1154 }
1154 CLAW_DBF_TEXT(4, trace, "ccwret"); 1155 CLAW_DBF_TEXT(4, trace, "ccwret");
@@ -2848,11 +2849,11 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2848 struct chbk *p_ch; 2849 struct chbk *p_ch;
2849 struct ccw_dev_id dev_id; 2850 struct ccw_dev_id dev_id;
2850 2851
2851 CLAW_DBF_TEXT_(2, setup, "%s", cdev->dev.bus_id); 2852 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2852 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ 2853 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2853 p_ch = &privptr->channel[i]; 2854 p_ch = &privptr->channel[i];
2854 p_ch->cdev = cdev; 2855 p_ch->cdev = cdev;
2855 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id); 2856 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2856 ccw_device_get_id(cdev, &dev_id); 2857 ccw_device_get_id(cdev, &dev_id);
2857 p_ch->devno = dev_id.devno; 2858 p_ch->devno = dev_id.devno;
2858 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { 2859 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
@@ -2879,7 +2880,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
2879 int ret; 2880 int ret;
2880 struct ccw_dev_id dev_id; 2881 struct ccw_dev_id dev_id;
2881 2882
2882 printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); 2883 printk(KERN_INFO "claw: add for %s\n",
2884 dev_name(&cgdev->cdev[READ]->dev));
2883 CLAW_DBF_TEXT(2, setup, "new_dev"); 2885 CLAW_DBF_TEXT(2, setup, "new_dev");
2884 privptr = cgdev->dev.driver_data; 2886 privptr = cgdev->dev.driver_data;
2885 cgdev->cdev[READ]->dev.driver_data = privptr; 2887 cgdev->cdev[READ]->dev.driver_data = privptr;
@@ -2903,14 +2905,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
2903 if (ret != 0) { 2905 if (ret != 0) {
2904 printk(KERN_WARNING 2906 printk(KERN_WARNING
2905 "claw: ccw_device_set_online %s READ failed " 2907 "claw: ccw_device_set_online %s READ failed "
2906 "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret); 2908 "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev),
2909 ret);
2907 goto out; 2910 goto out;
2908 } 2911 }
2909 ret = ccw_device_set_online(cgdev->cdev[WRITE]); 2912 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
2910 if (ret != 0) { 2913 if (ret != 0) {
2911 printk(KERN_WARNING 2914 printk(KERN_WARNING
2912 "claw: ccw_device_set_online %s WRITE failed " 2915 "claw: ccw_device_set_online %s WRITE failed "
2913 "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret); 2916 "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev),
2917 ret);
2914 goto out; 2918 goto out;
2915 } 2919 }
2916 dev = alloc_netdev(0,"claw%d",claw_init_netdevice); 2920 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
@@ -2986,7 +2990,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
2986 struct net_device *ndev; 2990 struct net_device *ndev;
2987 int ret; 2991 int ret;
2988 2992
2989 CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id); 2993 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2990 priv = cgdev->dev.driver_data; 2994 priv = cgdev->dev.driver_data;
2991 if (!priv) 2995 if (!priv)
2992 return -ENODEV; 2996 return -ENODEV;
@@ -3016,11 +3020,11 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3016 struct claw_privbk *priv; 3020 struct claw_privbk *priv;
3017 3021
3018 BUG_ON(!cgdev); 3022 BUG_ON(!cgdev);
3019 CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id); 3023 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3020 priv = cgdev->dev.driver_data; 3024 priv = cgdev->dev.driver_data;
3021 BUG_ON(!priv); 3025 BUG_ON(!priv);
3022 printk(KERN_INFO "claw: %s() called %s will be removed.\n", 3026 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
3023 __func__,cgdev->cdev[0]->dev.bus_id); 3027 __func__, dev_name(&cgdev->cdev[0]->dev));
3024 if (cgdev->state == CCWGROUP_ONLINE) 3028 if (cgdev->state == CCWGROUP_ONLINE)
3025 claw_shutdown_device(cgdev); 3029 claw_shutdown_device(cgdev);
3026 claw_remove_files(&cgdev->dev); 3030 claw_remove_files(&cgdev->dev);
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1a89d989f348..005072c420d3 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -85,7 +85,7 @@
85#define CLAW_MAX_DEV 256 /* max claw devices */ 85#define CLAW_MAX_DEV 256 /* max claw devices */
86#define MAX_NAME_LEN 8 /* host name, adapter name length */ 86#define MAX_NAME_LEN 8 /* host name, adapter name length */
87#define CLAW_FRAME_SIZE 4096 87#define CLAW_FRAME_SIZE 4096
88#define CLAW_ID_SIZE BUS_ID_SIZE+3 88#define CLAW_ID_SIZE 20+3
89 89
90/* state machine codes used in claw_irq_handler */ 90/* state machine codes used in claw_irq_handler */
91 91
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index b11fec24c7d2..a4e29836a2aa 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -277,18 +277,18 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
277 277
278 CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, 278 CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
279 "irb error %ld on device %s\n", 279 "irb error %ld on device %s\n",
280 PTR_ERR(irb), cdev->dev.bus_id); 280 PTR_ERR(irb), dev_name(&cdev->dev));
281 281
282 switch (PTR_ERR(irb)) { 282 switch (PTR_ERR(irb)) {
283 case -EIO: 283 case -EIO:
284 ctcm_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id); 284 ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev));
285 break; 285 break;
286 case -ETIMEDOUT: 286 case -ETIMEDOUT:
287 ctcm_pr_warn("timeout on device %s\n", cdev->dev.bus_id); 287 ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev));
288 break; 288 break;
289 default: 289 default:
290 ctcm_pr_warn("unknown error %ld on device %s\n", 290 ctcm_pr_warn("unknown error %ld on device %s\n",
291 PTR_ERR(irb), cdev->dev.bus_id); 291 PTR_ERR(irb), dev_name(&cdev->dev));
292 } 292 }
293 return PTR_ERR(irb); 293 return PTR_ERR(irb);
294} 294}
@@ -1182,7 +1182,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1182 int dstat; 1182 int dstat;
1183 1183
1184 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1184 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1185 "Enter %s(%s)", CTCM_FUNTAIL, &cdev->dev.bus_id); 1185 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1186 1186
1187 if (ctcm_check_irb_error(cdev, irb)) 1187 if (ctcm_check_irb_error(cdev, irb))
1188 return; 1188 return;
@@ -1208,14 +1208,14 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1208 ch = priv->channel[WRITE]; 1208 ch = priv->channel[WRITE];
1209 else { 1209 else {
1210 ctcm_pr_err("ctcm: Can't determine channel for interrupt, " 1210 ctcm_pr_err("ctcm: Can't determine channel for interrupt, "
1211 "device %s\n", cdev->dev.bus_id); 1211 "device %s\n", dev_name(&cdev->dev));
1212 return; 1212 return;
1213 } 1213 }
1214 1214
1215 dev = ch->netdev; 1215 dev = ch->netdev;
1216 if (dev == NULL) { 1216 if (dev == NULL) {
1217 ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", 1217 ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
1218 __func__, cdev->dev.bus_id, ch); 1218 __func__, dev_name(&cdev->dev), ch);
1219 return; 1219 return;
1220 } 1220 }
1221 1221
@@ -1329,7 +1329,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
1329 1329
1330 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1330 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1331 "%s(%s), type %d, proto %d", 1331 "%s(%s), type %d, proto %d",
1332 __func__, cdev->dev.bus_id, type, priv->protocol); 1332 __func__, dev_name(&cdev->dev), type, priv->protocol);
1333 1333
1334 ch = kzalloc(sizeof(struct channel), GFP_KERNEL); 1334 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1335 if (ch == NULL) 1335 if (ch == NULL)
@@ -1358,7 +1358,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
1358 goto nomem_return; 1358 goto nomem_return;
1359 1359
1360 ch->cdev = cdev; 1360 ch->cdev = cdev;
1361 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", cdev->dev.bus_id); 1361 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
1362 ch->type = type; 1362 ch->type = type;
1363 1363
1364 /** 1364 /**
@@ -1518,8 +1518,8 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1518 1518
1519 type = get_channel_type(&cdev0->id); 1519 type = get_channel_type(&cdev0->id);
1520 1520
1521 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cdev0->dev.bus_id); 1521 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
1522 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cdev1->dev.bus_id); 1522 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1523 1523
1524 ret = add_channel(cdev0, type, priv); 1524 ret = add_channel(cdev0, type, priv);
1525 if (ret) 1525 if (ret)
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 8e10ee86a5ee..d77cce3fe4d4 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -104,7 +104,7 @@
104#define READ 0 104#define READ 0
105#define WRITE 1 105#define WRITE 1
106 106
107#define CTCM_ID_SIZE BUS_ID_SIZE+3 107#define CTCM_ID_SIZE 20+3
108 108
109struct ctcm_profile { 109struct ctcm_profile {
110 unsigned long maxmulti; 110 unsigned long maxmulti;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index cbe470493bf0..19f5d5ed85e0 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1673,7 +1673,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
1673 1673
1674done: 1674done:
1675 if (rc) { 1675 if (rc) {
1676 ctcm_pr_info("ctcmpc : %s() failed\n", __FUNCTION__); 1676 ctcm_pr_info("ctcmpc : %s() failed\n", __func__);
1677 priv->xid->xid2_flag2 = 0x40; 1677 priv->xid->xid2_flag2 = 0x40;
1678 grp->saved_xid2->xid2_flag2 = 0x40; 1678 grp->saved_xid2->xid2_flag2 = 0x40;
1679 } 1679 }
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 9bcfa04d863b..0825be87e5a0 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -492,7 +492,7 @@ lcs_start_channel(struct lcs_channel *channel)
492 unsigned long flags; 492 unsigned long flags;
493 int rc; 493 int rc;
494 494
495 LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id); 495 LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
496 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 496 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
497 rc = ccw_device_start(channel->ccwdev, 497 rc = ccw_device_start(channel->ccwdev,
498 channel->ccws + channel->io_idx, 0, 0, 498 channel->ccws + channel->io_idx, 0, 0,
@@ -501,7 +501,8 @@ lcs_start_channel(struct lcs_channel *channel)
501 channel->state = LCS_CH_STATE_RUNNING; 501 channel->state = LCS_CH_STATE_RUNNING;
502 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 502 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
503 if (rc) { 503 if (rc) {
504 LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id); 504 LCS_DBF_TEXT_(4,trace,"essh%s",
505 dev_name(&channel->ccwdev->dev));
505 PRINT_ERR("Error in starting channel, rc=%d!\n", rc); 506 PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
506 } 507 }
507 return rc; 508 return rc;
@@ -514,12 +515,13 @@ lcs_clear_channel(struct lcs_channel *channel)
514 int rc; 515 int rc;
515 516
516 LCS_DBF_TEXT(4,trace,"clearch"); 517 LCS_DBF_TEXT(4,trace,"clearch");
517 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id); 518 LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
518 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 519 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
519 rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); 520 rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
520 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 521 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
521 if (rc) { 522 if (rc) {
522 LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id); 523 LCS_DBF_TEXT_(4, trace, "ecsc%s",
524 dev_name(&channel->ccwdev->dev));
523 return rc; 525 return rc;
524 } 526 }
525 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); 527 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
@@ -540,13 +542,14 @@ lcs_stop_channel(struct lcs_channel *channel)
540 if (channel->state == LCS_CH_STATE_STOPPED) 542 if (channel->state == LCS_CH_STATE_STOPPED)
541 return 0; 543 return 0;
542 LCS_DBF_TEXT(4,trace,"haltsch"); 544 LCS_DBF_TEXT(4,trace,"haltsch");
543 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id); 545 LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
544 channel->state = LCS_CH_STATE_INIT; 546 channel->state = LCS_CH_STATE_INIT;
545 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 547 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
546 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); 548 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
547 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 549 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
548 if (rc) { 550 if (rc) {
549 LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id); 551 LCS_DBF_TEXT_(4, trace, "ehsc%s",
552 dev_name(&channel->ccwdev->dev));
550 return rc; 553 return rc;
551 } 554 }
552 /* Asynchronous halt initialted. Wait for its completion. */ 555 /* Asynchronous halt initialted. Wait for its completion. */
@@ -632,10 +635,11 @@ __lcs_resume_channel(struct lcs_channel *channel)
632 return 0; 635 return 0;
633 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) 636 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
634 return 0; 637 return 0;
635 LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id); 638 LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
636 rc = ccw_device_resume(channel->ccwdev); 639 rc = ccw_device_resume(channel->ccwdev);
637 if (rc) { 640 if (rc) {
638 LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id); 641 LCS_DBF_TEXT_(4, trace, "ersc%s",
642 dev_name(&channel->ccwdev->dev));
639 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); 643 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
640 } else 644 } else
641 channel->state = LCS_CH_STATE_RUNNING; 645 channel->state = LCS_CH_STATE_RUNNING;
@@ -1302,18 +1306,18 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1302 1306
1303 switch (PTR_ERR(irb)) { 1307 switch (PTR_ERR(irb)) {
1304 case -EIO: 1308 case -EIO:
1305 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); 1309 PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
1306 LCS_DBF_TEXT(2, trace, "ckirberr"); 1310 LCS_DBF_TEXT(2, trace, "ckirberr");
1307 LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); 1311 LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
1308 break; 1312 break;
1309 case -ETIMEDOUT: 1313 case -ETIMEDOUT:
1310 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); 1314 PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
1311 LCS_DBF_TEXT(2, trace, "ckirberr"); 1315 LCS_DBF_TEXT(2, trace, "ckirberr");
1312 LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); 1316 LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
1313 break; 1317 break;
1314 default: 1318 default:
1315 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), 1319 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
1316 cdev->dev.bus_id); 1320 dev_name(&cdev->dev));
1317 LCS_DBF_TEXT(2, trace, "ckirberr"); 1321 LCS_DBF_TEXT(2, trace, "ckirberr");
1318 LCS_DBF_TEXT(2, trace, " rc???"); 1322 LCS_DBF_TEXT(2, trace, " rc???");
1319 } 1323 }
@@ -1390,7 +1394,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1390 1394
1391 cstat = irb->scsw.cmd.cstat; 1395 cstat = irb->scsw.cmd.cstat;
1392 dstat = irb->scsw.cmd.dstat; 1396 dstat = irb->scsw.cmd.dstat;
1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); 1397 LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
1394 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, 1398 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1395 irb->scsw.cmd.dstat); 1399 irb->scsw.cmd.dstat);
1396 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, 1400 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
@@ -1400,7 +1404,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1400 rc = lcs_get_problem(cdev, irb); 1404 rc = lcs_get_problem(cdev, irb);
1401 if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { 1405 if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
1402 PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", 1406 PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n",
1403 cdev->dev.bus_id, dstat, cstat); 1407 dev_name(&cdev->dev), dstat, cstat);
1404 if (rc) { 1408 if (rc) {
1405 channel->state = LCS_CH_STATE_ERROR; 1409 channel->state = LCS_CH_STATE_ERROR;
1406 } 1410 }
@@ -1463,7 +1467,7 @@ lcs_tasklet(unsigned long data)
1463 int rc; 1467 int rc;
1464 1468
1465 channel = (struct lcs_channel *) data; 1469 channel = (struct lcs_channel *) data;
1466 LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id); 1470 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
1467 1471
1468 /* Check for processed buffers. */ 1472 /* Check for processed buffers. */
1469 iob = channel->iob; 1473 iob = channel->iob;
@@ -2244,7 +2248,7 @@ lcs_recovery(void *ptr)
2244 return 0; 2248 return 0;
2245 LCS_DBF_TEXT(4, trace, "recover2"); 2249 LCS_DBF_TEXT(4, trace, "recover2");
2246 gdev = card->gdev; 2250 gdev = card->gdev;
2247 PRINT_WARN("Recovery of device %s started...\n", gdev->dev.bus_id); 2251 PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev));
2248 rc = __lcs_shutdown_device(gdev, 1); 2252 rc = __lcs_shutdown_device(gdev, 1);
2249 rc = lcs_new_device(gdev); 2253 rc = lcs_new_device(gdev);
2250 if (!rc) 2254 if (!rc)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 9242b5acc66b..0fea51e34b57 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1724,7 +1724,7 @@ static int netiucv_register_device(struct net_device *ndev)
1724 IUCV_DBF_TEXT(trace, 3, __func__); 1724 IUCV_DBF_TEXT(trace, 3, __func__);
1725 1725
1726 if (dev) { 1726 if (dev) {
1727 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); 1727 dev_set_name(dev, "net%s", ndev->name);
1728 dev->bus = &iucv_bus; 1728 dev->bus = &iucv_bus;
1729 dev->parent = iucv_root; 1729 dev->parent = iucv_root;
1730 /* 1730 /*
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bf8a75c92f28..af6d60458513 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -90,11 +90,11 @@ struct qeth_dbf_info {
90#define CARD_RDEV(card) card->read.ccwdev 90#define CARD_RDEV(card) card->read.ccwdev
91#define CARD_WDEV(card) card->write.ccwdev 91#define CARD_WDEV(card) card->write.ccwdev
92#define CARD_DDEV(card) card->data.ccwdev 92#define CARD_DDEV(card) card->data.ccwdev
93#define CARD_BUS_ID(card) card->gdev->dev.bus_id 93#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
94#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id 94#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
95#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id 95#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
96#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id 96#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
97#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id 97#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
98 98
99/** 99/**
100 * card stuff 100 * card stuff
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c7ab1b864516..7de410d5be4a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -745,7 +745,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
745 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 745 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
746 QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); 746 QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
747 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", 747 PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
748 cdev->dev.bus_id, dstat, cstat); 748 dev_name(&cdev->dev), dstat, cstat);
749 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 749 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
750 16, 1, irb, 64, 1); 750 16, 1, irb, 64, 1);
751 return 1; 751 return 1;
@@ -760,7 +760,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
760 if (sense[SENSE_COMMAND_REJECT_BYTE] & 760 if (sense[SENSE_COMMAND_REJECT_BYTE] &
761 SENSE_COMMAND_REJECT_FLAG) { 761 SENSE_COMMAND_REJECT_FLAG) {
762 QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); 762 QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
763 return 0; 763 return 1;
764 } 764 }
765 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 765 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
766 QETH_DBF_TEXT(TRACE, 2, "AFFE"); 766 QETH_DBF_TEXT(TRACE, 2, "AFFE");
@@ -784,12 +784,12 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
784 784
785 switch (PTR_ERR(irb)) { 785 switch (PTR_ERR(irb)) {
786 case -EIO: 786 case -EIO:
787 PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); 787 PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
788 QETH_DBF_TEXT(TRACE, 2, "ckirberr"); 788 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
789 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); 789 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
790 break; 790 break;
791 case -ETIMEDOUT: 791 case -ETIMEDOUT:
792 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); 792 PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
793 QETH_DBF_TEXT(TRACE, 2, "ckirberr"); 793 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
794 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); 794 QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
795 if (intparm == QETH_RCD_PARM) { 795 if (intparm == QETH_RCD_PARM) {
@@ -803,7 +803,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
803 break; 803 break;
804 default: 804 default:
805 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), 805 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
806 cdev->dev.bus_id); 806 dev_name(&cdev->dev));
807 QETH_DBF_TEXT(TRACE, 2, "ckirberr"); 807 QETH_DBF_TEXT(TRACE, 2, "ckirberr");
808 QETH_DBF_TEXT(TRACE, 2, " rc???"); 808 QETH_DBF_TEXT(TRACE, 2, " rc???");
809 } 809 }
@@ -884,6 +884,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
884 } 884 }
885 rc = qeth_get_problem(cdev, irb); 885 rc = qeth_get_problem(cdev, irb);
886 if (rc) { 886 if (rc) {
887 qeth_clear_ipacmd_list(card);
887 qeth_schedule_recovery(card); 888 qeth_schedule_recovery(card);
888 goto out; 889 goto out;
889 } 890 }
@@ -4081,7 +4082,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4081 if (!get_device(dev)) 4082 if (!get_device(dev))
4082 return -ENODEV; 4083 return -ENODEV;
4083 4084
4084 QETH_DBF_TEXT_(SETUP, 2, "%s", gdev->dev.bus_id); 4085 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
4085 4086
4086 card = qeth_alloc_card(); 4087 card = qeth_alloc_card();
4087 if (!card) { 4088 if (!card) {
@@ -4147,6 +4148,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4147 unsigned long flags; 4148 unsigned long flags;
4148 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4149 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4149 4150
4151 QETH_DBF_TEXT(SETUP, 2, "removedv");
4150 if (card->discipline.ccwgdriver) { 4152 if (card->discipline.ccwgdriver) {
4151 card->discipline.ccwgdriver->remove(gdev); 4153 card->discipline.ccwgdriver->remove(gdev);
4152 qeth_core_free_discipline(card); 4154 qeth_core_free_discipline(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3ac3cc1e03cc..955ba7a31b90 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -395,7 +395,8 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
395 } 395 }
396 if (card->state == CARD_STATE_SOFTSETUP) { 396 if (card->state == CARD_STATE_SOFTSETUP) {
397 qeth_l2_process_vlans(card, 1); 397 qeth_l2_process_vlans(card, 1);
398 qeth_l2_del_all_mc(card); 398 if (!card->use_hard_stop)
399 qeth_l2_del_all_mc(card);
399 qeth_clear_ipacmd_list(card); 400 qeth_clear_ipacmd_list(card);
400 card->state = CARD_STATE_HARDSETUP; 401 card->state = CARD_STATE_HARDSETUP;
401 } 402 }
@@ -559,7 +560,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
559 "device %s: x%x\n", CARD_BUS_ID(card), rc); 560 "device %s: x%x\n", CARD_BUS_ID(card), rc);
560 } 561 }
561 562
562 if (card->info.guestlan) { 563 if ((card->info.type == QETH_CARD_TYPE_IQD) ||
564 (card->info.guestlan)) {
563 rc = qeth_setadpparms_change_macaddr(card); 565 rc = qeth_setadpparms_change_macaddr(card);
564 if (rc) { 566 if (rc) {
565 QETH_DBF_MESSAGE(2, "couldn't get MAC address on " 567 QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -825,7 +827,6 @@ static int qeth_l2_open(struct net_device *dev)
825 } 827 }
826 card->data.state = CH_STATE_UP; 828 card->data.state = CH_STATE_UP;
827 card->state = CARD_STATE_UP; 829 card->state = CARD_STATE_UP;
828 card->dev->flags |= IFF_UP;
829 netif_start_queue(dev); 830 netif_start_queue(dev);
830 831
831 if (!card->lan_online && netif_carrier_ok(dev)) 832 if (!card->lan_online && netif_carrier_ok(dev))
@@ -840,7 +841,6 @@ static int qeth_l2_stop(struct net_device *dev)
840 841
841 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 842 QETH_DBF_TEXT(TRACE, 4, "qethstop");
842 netif_tx_disable(dev); 843 netif_tx_disable(dev);
843 card->dev->flags &= ~IFF_UP;
844 if (card->state == CARD_STATE_UP) 844 if (card->state == CARD_STATE_UP)
845 card->state = CARD_STATE_SOFTSETUP; 845 card->state = CARD_STATE_SOFTSETUP;
846 return 0; 846 return 0;
@@ -1137,9 +1137,13 @@ static int qeth_l2_recover(void *ptr)
1137 if (!rc) 1137 if (!rc)
1138 PRINT_INFO("Device %s successfully recovered!\n", 1138 PRINT_INFO("Device %s successfully recovered!\n",
1139 CARD_BUS_ID(card)); 1139 CARD_BUS_ID(card));
1140 else 1140 else {
1141 rtnl_lock();
1142 dev_close(card->dev);
1143 rtnl_unlock();
1141 PRINT_INFO("Device %s could not be recovered!\n", 1144 PRINT_INFO("Device %s could not be recovered!\n",
1142 CARD_BUS_ID(card)); 1145 CARD_BUS_ID(card));
1146 }
1143 return 0; 1147 return 0;
1144} 1148}
1145 1149
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index dd72c3c20165..99547dea44de 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2795,7 +2795,6 @@ static int qeth_l3_open(struct net_device *dev)
2795 return -ENODEV; 2795 return -ENODEV;
2796 card->data.state = CH_STATE_UP; 2796 card->data.state = CH_STATE_UP;
2797 card->state = CARD_STATE_UP; 2797 card->state = CARD_STATE_UP;
2798 card->dev->flags |= IFF_UP;
2799 netif_start_queue(dev); 2798 netif_start_queue(dev);
2800 2799
2801 if (!card->lan_online && netif_carrier_ok(dev)) 2800 if (!card->lan_online && netif_carrier_ok(dev))
@@ -2809,7 +2808,6 @@ static int qeth_l3_stop(struct net_device *dev)
2809 2808
2810 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 2809 QETH_DBF_TEXT(TRACE, 4, "qethstop");
2811 netif_tx_disable(dev); 2810 netif_tx_disable(dev);
2812 card->dev->flags &= ~IFF_UP;
2813 if (card->state == CARD_STATE_UP) 2811 if (card->state == CARD_STATE_UP)
2814 card->state = CARD_STATE_SOFTSETUP; 2812 card->state = CARD_STATE_SOFTSETUP;
2815 return 0; 2813 return 0;
@@ -3218,9 +3216,13 @@ static int qeth_l3_recover(void *ptr)
3218 if (!rc) 3216 if (!rc)
3219 PRINT_INFO("Device %s successfully recovered!\n", 3217 PRINT_INFO("Device %s successfully recovered!\n",
3220 CARD_BUS_ID(card)); 3218 CARD_BUS_ID(card));
3221 else 3219 else {
3220 rtnl_lock();
3221 dev_close(card->dev);
3222 rtnl_unlock();
3222 PRINT_INFO("Device %s could not be recovered!\n", 3223 PRINT_INFO("Device %s could not be recovered!\n",
3223 CARD_BUS_ID(card)); 3224 CARD_BUS_ID(card));
3225 }
3224 return 0; 3226 return 0;
3225} 3227}
3226 3228
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
index 3c7145d9f9a1..64371c05a3b3 100644
--- a/drivers/s390/s390_rdev.c
+++ b/drivers/s390/s390_rdev.c
@@ -30,7 +30,7 @@ s390_root_dev_register(const char *name)
30 dev = kzalloc(sizeof(struct device), GFP_KERNEL); 30 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
31 if (!dev) 31 if (!dev)
32 return ERR_PTR(-ENOMEM); 32 return ERR_PTR(-ENOMEM);
33 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); 33 dev_set_name(dev, name);
34 dev->release = s390_root_dev_release; 34 dev->release = s390_root_dev_release;
35 ret = device_register(dev); 35 ret = device_register(dev);
36 if (ret) { 36 if (ret) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 90abfd06ed55..3b56220fb900 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -88,11 +88,13 @@ static int __init zfcp_device_setup(char *devstr)
88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); 88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
89 89
90 token = strsep(&str, ","); 90 token = strsep(&str, ",");
91 if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn)) 91 if (!token || strict_strtoull(token, 0,
92 (unsigned long long *) &zfcp_data.init_wwpn))
92 goto err_out; 93 goto err_out;
93 94
94 token = strsep(&str, ","); 95 token = strsep(&str, ",");
95 if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun)) 96 if (!token || strict_strtoull(token, 0,
97 (unsigned long long *) &zfcp_data.init_fcp_lun))
96 goto err_out; 98 goto err_out;
97 99
98 kfree(str); 100 kfree(str);
@@ -100,24 +102,10 @@ static int __init zfcp_device_setup(char *devstr)
100 102
101 err_out: 103 err_out:
102 kfree(str); 104 kfree(str);
103 pr_err("zfcp: Parse error for device parameter string %s, " 105 pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
104 "device not attached.\n", devstr);
105 return 0; 106 return 0;
106} 107}
107 108
108static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
109{
110 struct zfcp_adapter *adapter;
111
112 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
113 if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
114 BUS_ID_SIZE) == 0) &&
115 !(atomic_read(&adapter->status) &
116 ZFCP_STATUS_COMMON_REMOVE))
117 return adapter;
118 return NULL;
119}
120
121static void __init zfcp_init_device_configure(void) 109static void __init zfcp_init_device_configure(void)
122{ 110{
123 struct zfcp_adapter *adapter; 111 struct zfcp_adapter *adapter;
@@ -141,7 +129,12 @@ static void __init zfcp_init_device_configure(void)
141 goto out_unit; 129 goto out_unit;
142 up(&zfcp_data.config_sema); 130 up(&zfcp_data.config_sema);
143 ccw_device_set_online(adapter->ccw_device); 131 ccw_device_set_online(adapter->ccw_device);
132
144 zfcp_erp_wait(adapter); 133 zfcp_erp_wait(adapter);
134 wait_event(adapter->erp_done_wqh,
135 !(atomic_read(&unit->status) &
136 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
137
145 down(&zfcp_data.config_sema); 138 down(&zfcp_data.config_sema);
146 zfcp_unit_put(unit); 139 zfcp_unit_put(unit);
147out_unit: 140out_unit:
@@ -180,9 +173,9 @@ static int __init zfcp_module_init(void)
180 if (!zfcp_data.gid_pn_cache) 173 if (!zfcp_data.gid_pn_cache)
181 goto out_gid_cache; 174 goto out_gid_cache;
182 175
183 INIT_LIST_HEAD(&zfcp_data.adapter_list_head); 176 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
184 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
185 177
178 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
186 sema_init(&zfcp_data.config_sema, 1); 179 sema_init(&zfcp_data.config_sema, 1);
187 rwlock_init(&zfcp_data.config_lock); 180 rwlock_init(&zfcp_data.config_lock);
188 181
@@ -193,13 +186,14 @@ static int __init zfcp_module_init(void)
193 186
194 retval = misc_register(&zfcp_cfdc_misc); 187 retval = misc_register(&zfcp_cfdc_misc);
195 if (retval) { 188 if (retval) {
196 pr_err("zfcp: registration of misc device zfcp_cfdc failed\n"); 189 pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
197 goto out_misc; 190 goto out_misc;
198 } 191 }
199 192
200 retval = zfcp_ccw_register(); 193 retval = zfcp_ccw_register();
201 if (retval) { 194 if (retval) {
202 pr_err("zfcp: Registration with common I/O layer failed.\n"); 195 pr_err("zfcp: The zfcp device driver could not register with "
196 "the common I/O layer\n");
203 goto out_ccw_register; 197 goto out_ccw_register;
204 } 198 }
205 199
@@ -231,8 +225,7 @@ module_init(zfcp_module_init);
231 * 225 *
232 * Returns: pointer to zfcp_unit or NULL 226 * Returns: pointer to zfcp_unit or NULL
233 */ 227 */
234struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, 228struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
235 fcp_lun_t fcp_lun)
236{ 229{
237 struct zfcp_unit *unit; 230 struct zfcp_unit *unit;
238 231
@@ -251,7 +244,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
251 * Returns: pointer to zfcp_port or NULL 244 * Returns: pointer to zfcp_port or NULL
252 */ 245 */
253struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 246struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
254 wwn_t wwpn) 247 u64 wwpn)
255{ 248{
256 struct zfcp_port *port; 249 struct zfcp_port *port;
257 250
@@ -276,7 +269,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
276 * 269 *
277 * Sets up some unit internal structures and creates sysfs entry. 270 * Sets up some unit internal structures and creates sysfs entry.
278 */ 271 */
279struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) 272struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
280{ 273{
281 struct zfcp_unit *unit; 274 struct zfcp_unit *unit;
282 275
@@ -290,7 +283,8 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
290 unit->port = port; 283 unit->port = port;
291 unit->fcp_lun = fcp_lun; 284 unit->fcp_lun = fcp_lun;
292 285
293 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); 286 dev_set_name(&unit->sysfs_device, "0x%016llx",
287 (unsigned long long) fcp_lun);
294 unit->sysfs_device.parent = &port->sysfs_device; 288 unit->sysfs_device.parent = &port->sysfs_device;
295 unit->sysfs_device.release = zfcp_sysfs_unit_release; 289 unit->sysfs_device.release = zfcp_sysfs_unit_release;
296 dev_set_drvdata(&unit->sysfs_device, unit); 290 dev_set_drvdata(&unit->sysfs_device, unit);
@@ -323,7 +317,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
323 } 317 }
324 318
325 zfcp_unit_get(unit); 319 zfcp_unit_get(unit);
326 unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
327 320
328 write_lock_irq(&zfcp_data.config_lock); 321 write_lock_irq(&zfcp_data.config_lock);
329 list_add_tail(&unit->list, &port->unit_list_head); 322 list_add_tail(&unit->list, &port->unit_list_head);
@@ -332,7 +325,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
332 325
333 write_unlock_irq(&zfcp_data.config_lock); 326 write_unlock_irq(&zfcp_data.config_lock);
334 327
335 port->units++;
336 zfcp_port_get(port); 328 zfcp_port_get(port);
337 329
338 return unit; 330 return unit;
@@ -351,11 +343,10 @@ err_out_free:
351 */ 343 */
352void zfcp_unit_dequeue(struct zfcp_unit *unit) 344void zfcp_unit_dequeue(struct zfcp_unit *unit)
353{ 345{
354 zfcp_unit_wait(unit); 346 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
355 write_lock_irq(&zfcp_data.config_lock); 347 write_lock_irq(&zfcp_data.config_lock);
356 list_del(&unit->list); 348 list_del(&unit->list);
357 write_unlock_irq(&zfcp_data.config_lock); 349 write_unlock_irq(&zfcp_data.config_lock);
358 unit->port->units--;
359 zfcp_port_put(unit->port); 350 zfcp_port_put(unit->port);
360 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); 351 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
361 device_unregister(&unit->sysfs_device); 352 device_unregister(&unit->sysfs_device);
@@ -416,11 +407,6 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
416 mempool_destroy(adapter->pool.data_gid_pn); 407 mempool_destroy(adapter->pool.data_gid_pn);
417} 408}
418 409
419static void zfcp_dummy_release(struct device *dev)
420{
421 return;
422}
423
424/** 410/**
425 * zfcp_status_read_refill - refill the long running status_read_requests 411 * zfcp_status_read_refill - refill the long running status_read_requests
426 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled 412 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
@@ -450,19 +436,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
450 stat_work)); 436 stat_work));
451} 437}
452 438
453static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
454{
455 struct zfcp_port *port;
456
457 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
458 ZFCP_DID_DIRECTORY_SERVICE);
459 if (IS_ERR(port))
460 return PTR_ERR(port);
461 zfcp_port_put(port);
462
463 return 0;
464}
465
466/** 439/**
467 * zfcp_adapter_enqueue - enqueue a new adapter to the list 440 * zfcp_adapter_enqueue - enqueue a new adapter to the list
468 * @ccw_device: pointer to the struct cc_device 441 * @ccw_device: pointer to the struct cc_device
@@ -508,7 +481,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
508 init_waitqueue_head(&adapter->erp_done_wqh); 481 init_waitqueue_head(&adapter->erp_done_wqh);
509 482
510 INIT_LIST_HEAD(&adapter->port_list_head); 483 INIT_LIST_HEAD(&adapter->port_list_head);
511 INIT_LIST_HEAD(&adapter->port_remove_lh);
512 INIT_LIST_HEAD(&adapter->erp_ready_head); 484 INIT_LIST_HEAD(&adapter->erp_ready_head);
513 INIT_LIST_HEAD(&adapter->erp_running_head); 485 INIT_LIST_HEAD(&adapter->erp_running_head);
514 486
@@ -518,7 +490,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
518 spin_lock_init(&adapter->san_dbf_lock); 490 spin_lock_init(&adapter->san_dbf_lock);
519 spin_lock_init(&adapter->scsi_dbf_lock); 491 spin_lock_init(&adapter->scsi_dbf_lock);
520 spin_lock_init(&adapter->rec_dbf_lock); 492 spin_lock_init(&adapter->rec_dbf_lock);
521 spin_lock_init(&adapter->req_q.lock); 493 spin_lock_init(&adapter->req_q_lock);
522 494
523 rwlock_init(&adapter->erp_lock); 495 rwlock_init(&adapter->erp_lock);
524 rwlock_init(&adapter->abort_lock); 496 rwlock_init(&adapter->abort_lock);
@@ -537,28 +509,15 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
537 &zfcp_sysfs_adapter_attrs)) 509 &zfcp_sysfs_adapter_attrs))
538 goto sysfs_failed; 510 goto sysfs_failed;
539 511
540 adapter->generic_services.parent = &adapter->ccw_device->dev;
541 adapter->generic_services.release = zfcp_dummy_release;
542 snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
543 "generic_services");
544
545 if (device_register(&adapter->generic_services))
546 goto generic_services_failed;
547
548 write_lock_irq(&zfcp_data.config_lock); 512 write_lock_irq(&zfcp_data.config_lock);
549 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 513 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
550 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); 514 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
551 write_unlock_irq(&zfcp_data.config_lock); 515 write_unlock_irq(&zfcp_data.config_lock);
552 516
553 zfcp_data.adapters++; 517 zfcp_fc_nameserver_init(adapter);
554
555 zfcp_nameserver_enqueue(adapter);
556 518
557 return 0; 519 return 0;
558 520
559generic_services_failed:
560 sysfs_remove_group(&ccw_device->dev.kobj,
561 &zfcp_sysfs_adapter_attrs);
562sysfs_failed: 521sysfs_failed:
563 zfcp_adapter_debug_unregister(adapter); 522 zfcp_adapter_debug_unregister(adapter);
564debug_register_failed: 523debug_register_failed:
@@ -585,7 +544,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
585 cancel_work_sync(&adapter->scan_work); 544 cancel_work_sync(&adapter->scan_work);
586 cancel_work_sync(&adapter->stat_work); 545 cancel_work_sync(&adapter->stat_work);
587 zfcp_adapter_scsi_unregister(adapter); 546 zfcp_adapter_scsi_unregister(adapter);
588 device_unregister(&adapter->generic_services);
589 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 547 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
590 &zfcp_sysfs_adapter_attrs); 548 &zfcp_sysfs_adapter_attrs);
591 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 549 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
@@ -603,9 +561,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
603 list_del(&adapter->list); 561 list_del(&adapter->list);
604 write_unlock_irq(&zfcp_data.config_lock); 562 write_unlock_irq(&zfcp_data.config_lock);
605 563
606 /* decrease number of adapters in list */
607 zfcp_data.adapters--;
608
609 zfcp_qdio_free(adapter); 564 zfcp_qdio_free(adapter);
610 565
611 zfcp_free_low_mem_buffers(adapter); 566 zfcp_free_low_mem_buffers(adapter);
@@ -633,21 +588,19 @@ static void zfcp_sysfs_port_release(struct device *dev)
633 * d_id is used to enqueue ports with a well known address like the Directory 588 * d_id is used to enqueue ports with a well known address like the Directory
634 * Service for nameserver lookup. 589 * Service for nameserver lookup.
635 */ 590 */
636struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, 591struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
637 u32 status, u32 d_id) 592 u32 status, u32 d_id)
638{ 593{
639 struct zfcp_port *port; 594 struct zfcp_port *port;
640 int retval; 595 int retval;
641 char *bus_id;
642 596
643 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 597 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
644 if (!port) 598 if (!port)
645 return ERR_PTR(-ENOMEM); 599 return ERR_PTR(-ENOMEM);
646 600
647 init_waitqueue_head(&port->remove_wq); 601 init_waitqueue_head(&port->remove_wq);
648
649 INIT_LIST_HEAD(&port->unit_list_head); 602 INIT_LIST_HEAD(&port->unit_list_head);
650 INIT_LIST_HEAD(&port->unit_remove_lh); 603 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
651 604
652 port->adapter = adapter; 605 port->adapter = adapter;
653 port->d_id = d_id; 606 port->d_id = d_id;
@@ -657,34 +610,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
657 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
658 atomic_set(&port->refcount, 0); 611 atomic_set(&port->refcount, 0);
659 612
660 if (status & ZFCP_STATUS_PORT_WKA) { 613 dev_set_name(&port->sysfs_device, "0x%016llx", wwpn);
661 switch (d_id) { 614 port->sysfs_device.parent = &adapter->ccw_device->dev;
662 case ZFCP_DID_DIRECTORY_SERVICE:
663 bus_id = "directory";
664 break;
665 case ZFCP_DID_MANAGEMENT_SERVICE:
666 bus_id = "management";
667 break;
668 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
669 bus_id = "key_distribution";
670 break;
671 case ZFCP_DID_ALIAS_SERVICE:
672 bus_id = "alias";
673 break;
674 case ZFCP_DID_TIME_SERVICE:
675 bus_id = "time";
676 break;
677 default:
678 kfree(port);
679 return ERR_PTR(-EINVAL);
680 }
681 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
682 port->sysfs_device.parent = &adapter->generic_services;
683 } else {
684 snprintf(port->sysfs_device.bus_id,
685 BUS_ID_SIZE, "0x%016llx", wwpn);
686 port->sysfs_device.parent = &adapter->ccw_device->dev;
687 }
688 615
689 port->sysfs_device.release = zfcp_sysfs_port_release; 616 port->sysfs_device.release = zfcp_sysfs_port_release;
690 dev_set_drvdata(&port->sysfs_device, port); 617 dev_set_drvdata(&port->sysfs_device, port);
@@ -700,12 +627,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
700 if (device_register(&port->sysfs_device)) 627 if (device_register(&port->sysfs_device))
701 goto err_out_free; 628 goto err_out_free;
702 629
703 if (status & ZFCP_STATUS_PORT_WKA) 630 retval = sysfs_create_group(&port->sysfs_device.kobj,
704 retval = sysfs_create_group(&port->sysfs_device.kobj, 631 &zfcp_sysfs_port_attrs);
705 &zfcp_sysfs_ns_port_attrs);
706 else
707 retval = sysfs_create_group(&port->sysfs_device.kobj,
708 &zfcp_sysfs_port_attrs);
709 632
710 if (retval) { 633 if (retval) {
711 device_unregister(&port->sysfs_device); 634 device_unregister(&port->sysfs_device);
@@ -718,10 +641,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
718 list_add_tail(&port->list, &adapter->port_list_head); 641 list_add_tail(&port->list, &adapter->port_list_head);
719 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 642 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
720 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); 643 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
721 if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
722 if (!adapter->nameserver_port)
723 adapter->nameserver_port = port;
724 adapter->ports++;
725 644
726 write_unlock_irq(&zfcp_data.config_lock); 645 write_unlock_irq(&zfcp_data.config_lock);
727 646
@@ -740,21 +659,15 @@ err_out:
740 */ 659 */
741void zfcp_port_dequeue(struct zfcp_port *port) 660void zfcp_port_dequeue(struct zfcp_port *port)
742{ 661{
743 zfcp_port_wait(port); 662 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
744 write_lock_irq(&zfcp_data.config_lock); 663 write_lock_irq(&zfcp_data.config_lock);
745 list_del(&port->list); 664 list_del(&port->list);
746 port->adapter->ports--;
747 write_unlock_irq(&zfcp_data.config_lock); 665 write_unlock_irq(&zfcp_data.config_lock);
748 if (port->rport) 666 if (port->rport)
749 fc_remote_port_delete(port->rport); 667 fc_remote_port_delete(port->rport);
750 port->rport = NULL; 668 port->rport = NULL;
751 zfcp_adapter_put(port->adapter); 669 zfcp_adapter_put(port->adapter);
752 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA) 670 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
753 sysfs_remove_group(&port->sysfs_device.kobj,
754 &zfcp_sysfs_ns_port_attrs);
755 else
756 sysfs_remove_group(&port->sysfs_device.kobj,
757 &zfcp_sysfs_port_attrs);
758 device_unregister(&port->sysfs_device); 671 device_unregister(&port->sysfs_device);
759} 672}
760 673
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 391dd29749f8..b04038c74786 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -25,7 +25,8 @@ static int zfcp_ccw_probe(struct ccw_device *ccw_device)
25 down(&zfcp_data.config_sema); 25 down(&zfcp_data.config_sema);
26 if (zfcp_adapter_enqueue(ccw_device)) { 26 if (zfcp_adapter_enqueue(ccw_device)) {
27 dev_err(&ccw_device->dev, 27 dev_err(&ccw_device->dev,
28 "Setup of data structures failed.\n"); 28 "Setting up data structures for the "
29 "FCP adapter failed\n");
29 retval = -EINVAL; 30 retval = -EINVAL;
30 } 31 }
31 up(&zfcp_data.config_sema); 32 up(&zfcp_data.config_sema);
@@ -46,6 +47,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
46 struct zfcp_adapter *adapter; 47 struct zfcp_adapter *adapter;
47 struct zfcp_port *port, *p; 48 struct zfcp_port *port, *p;
48 struct zfcp_unit *unit, *u; 49 struct zfcp_unit *unit, *u;
50 LIST_HEAD(unit_remove_lh);
51 LIST_HEAD(port_remove_lh);
49 52
50 ccw_device_set_offline(ccw_device); 53 ccw_device_set_offline(ccw_device);
51 down(&zfcp_data.config_sema); 54 down(&zfcp_data.config_sema);
@@ -54,26 +57,26 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
54 write_lock_irq(&zfcp_data.config_lock); 57 write_lock_irq(&zfcp_data.config_lock);
55 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 58 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
56 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 59 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
57 list_move(&unit->list, &port->unit_remove_lh); 60 list_move(&unit->list, &unit_remove_lh);
58 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 61 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
59 &unit->status); 62 &unit->status);
60 } 63 }
61 list_move(&port->list, &adapter->port_remove_lh); 64 list_move(&port->list, &port_remove_lh);
62 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 65 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
63 } 66 }
64 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 67 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
65 write_unlock_irq(&zfcp_data.config_lock); 68 write_unlock_irq(&zfcp_data.config_lock);
66 69
67 list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) { 70 list_for_each_entry_safe(port, p, &port_remove_lh, list) {
68 list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) { 71 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
69 if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, 72 if (atomic_read(&unit->status) &
70 &unit->status)) 73 ZFCP_STATUS_UNIT_REGISTERED)
71 scsi_remove_device(unit->device); 74 scsi_remove_device(unit->device);
72 zfcp_unit_dequeue(unit); 75 zfcp_unit_dequeue(unit);
73 } 76 }
74 zfcp_port_dequeue(port); 77 zfcp_port_dequeue(port);
75 } 78 }
76 zfcp_adapter_wait(adapter); 79 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
77 zfcp_adapter_dequeue(adapter); 80 zfcp_adapter_dequeue(adapter);
78 81
79 up(&zfcp_data.config_sema); 82 up(&zfcp_data.config_sema);
@@ -152,21 +155,22 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
152 */ 155 */
153static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) 156static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
154{ 157{
155 struct zfcp_adapter *adapter; 158 struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev);
156 159
157 down(&zfcp_data.config_sema);
158 adapter = dev_get_drvdata(&ccw_device->dev);
159 switch (event) { 160 switch (event) {
160 case CIO_GONE: 161 case CIO_GONE:
161 dev_warn(&adapter->ccw_device->dev, "device gone\n"); 162 dev_warn(&adapter->ccw_device->dev,
163 "The FCP device has been detached\n");
162 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 164 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
163 break; 165 break;
164 case CIO_NO_PATH: 166 case CIO_NO_PATH:
165 dev_warn(&adapter->ccw_device->dev, "no path\n"); 167 dev_warn(&adapter->ccw_device->dev,
168 "The CHPID for the FCP device is offline\n");
166 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 169 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
167 break; 170 break;
168 case CIO_OPER: 171 case CIO_OPER:
169 dev_info(&adapter->ccw_device->dev, "operational again\n"); 172 dev_info(&adapter->ccw_device->dev,
173 "The FCP device is operational again\n");
170 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 174 zfcp_erp_modify_adapter_status(adapter, 11, NULL,
171 ZFCP_STATUS_COMMON_RUNNING, 175 ZFCP_STATUS_COMMON_RUNNING,
172 ZFCP_SET); 176 ZFCP_SET);
@@ -174,8 +178,6 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
174 89, NULL); 178 89, NULL);
175 break; 179 break;
176 } 180 }
177 zfcp_erp_wait(adapter);
178 up(&zfcp_data.config_sema);
179 return 1; 181 return 1;
180} 182}
181 183
@@ -224,3 +226,20 @@ int __init zfcp_ccw_register(void)
224{ 226{
225 return ccw_driver_register(&zfcp_ccw_driver); 227 return ccw_driver_register(&zfcp_ccw_driver);
226} 228}
229
230/**
231 * zfcp_get_adapter_by_busid - find zfcp_adapter struct
232 * @busid: bus id string of zfcp adapter to find
233 */
234struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
235{
236 struct ccw_device *ccw_device;
237 struct zfcp_adapter *adapter = NULL;
238
239 ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
240 if (ccw_device) {
241 adapter = dev_get_drvdata(&ccw_device->dev);
242 put_device(&ccw_device->dev);
243 }
244 return adapter;
245}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index fca48b88fc53..060f5f2352ec 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -318,6 +318,26 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
319} 319}
320 320
321/**
322 * zfcp_hba_dbf_event_berr - trace event for bit error threshold
323 * @adapter: adapter affected by this QDIO related event
324 * @req: fsf request
325 */
326void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
327 struct zfcp_fsf_req *req)
328{
329 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
330 struct fsf_status_read_buffer *sr_buf = req->data;
331 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
332 unsigned long flags;
333
334 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
335 memset(r, 0, sizeof(*r));
336 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
337 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
338 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
339 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
340}
321static void zfcp_hba_dbf_view_response(char **p, 341static void zfcp_hba_dbf_view_response(char **p,
322 struct zfcp_hba_dbf_record_response *r) 342 struct zfcp_hba_dbf_record_response *r)
323{ 343{
@@ -399,6 +419,30 @@ static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 419 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
400} 420}
401 421
422static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
423{
424 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
425 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
426 zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
427 zfcp_dbf_out(p, "prim_seq_err", "%d",
428 r->primitive_sequence_error_count);
429 zfcp_dbf_out(p, "inval_trans_word_err", "%d",
430 r->invalid_transmission_word_error_count);
431 zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
432 zfcp_dbf_out(p, "prim_seq_event_to", "%d",
433 r->primitive_sequence_event_timeout_count);
434 zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
435 r->elastic_buffer_overrun_error_count);
436 zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
437 r->advertised_receive_b2b_credit);
438 zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
439 r->current_receive_b2b_credit);
440 zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
441 r->advertised_transmit_b2b_credit);
442 zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
443 r->current_transmit_b2b_credit);
444}
445
402static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, 446static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
403 char *out_buf, const char *in_buf) 447 char *out_buf, const char *in_buf)
404{ 448{
@@ -418,6 +462,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
418 zfcp_hba_dbf_view_status(&p, &r->u.status); 462 zfcp_hba_dbf_view_status(&p, &r->u.status);
419 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) 463 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
420 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); 464 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
465 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
466 zfcp_hba_dbf_view_berr(&p, &r->u.berr);
421 467
422 p += sprintf(p, "\n"); 468 p += sprintf(p, "\n");
423 return p - out_buf; 469 return p - out_buf;
@@ -519,14 +565,14 @@ static const char *zfcp_rec_dbf_ids[] = {
519 [75] = "physical port recovery escalation after failed port " 565 [75] = "physical port recovery escalation after failed port "
520 "recovery", 566 "recovery",
521 [76] = "port recovery escalation after failed unit recovery", 567 [76] = "port recovery escalation after failed unit recovery",
522 [77] = "recovery opening nameserver port", 568 [77] = "",
523 [78] = "duplicate request id", 569 [78] = "duplicate request id",
524 [79] = "link down", 570 [79] = "link down",
525 [80] = "exclusive read-only unit access unsupported", 571 [80] = "exclusive read-only unit access unsupported",
526 [81] = "shared read-write unit access unsupported", 572 [81] = "shared read-write unit access unsupported",
527 [82] = "incoming rscn", 573 [82] = "incoming rscn",
528 [83] = "incoming wwpn", 574 [83] = "incoming wwpn",
529 [84] = "", 575 [84] = "wka port handle not valid close port",
530 [85] = "online", 576 [85] = "online",
531 [86] = "offline", 577 [86] = "offline",
532 [87] = "ccw device gone", 578 [87] = "ccw device gone",
@@ -570,7 +616,7 @@ static const char *zfcp_rec_dbf_ids[] = {
570 [125] = "need newer zfcp", 616 [125] = "need newer zfcp",
571 [126] = "need newer microcode", 617 [126] = "need newer microcode",
572 [127] = "arbitrated loop not supported", 618 [127] = "arbitrated loop not supported",
573 [128] = "unknown topology", 619 [128] = "",
574 [129] = "qtcb size mismatch", 620 [129] = "qtcb size mismatch",
575 [130] = "unknown fsf status ecd", 621 [130] = "unknown fsf status ecd",
576 [131] = "fcp request too big", 622 [131] = "fcp request too big",
@@ -829,9 +875,9 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
829void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 875void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
830{ 876{
831 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 877 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
832 struct zfcp_port *port = ct->port; 878 struct zfcp_wka_port *wka_port = ct->wka_port;
833 struct zfcp_adapter *adapter = port->adapter; 879 struct zfcp_adapter *adapter = wka_port->adapter;
834 struct ct_hdr *hdr = zfcp_sg_to_address(ct->req); 880 struct ct_hdr *hdr = sg_virt(ct->req);
835 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 881 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
836 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 882 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
837 unsigned long flags; 883 unsigned long flags;
@@ -842,7 +888,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
842 r->fsf_reqid = (unsigned long)fsf_req; 888 r->fsf_reqid = (unsigned long)fsf_req;
843 r->fsf_seqno = fsf_req->seq_no; 889 r->fsf_seqno = fsf_req->seq_no;
844 r->s_id = fc_host_port_id(adapter->scsi_host); 890 r->s_id = fc_host_port_id(adapter->scsi_host);
845 r->d_id = port->d_id; 891 r->d_id = wka_port->d_id;
846 oct->cmd_req_code = hdr->cmd_rsp_code; 892 oct->cmd_req_code = hdr->cmd_rsp_code;
847 oct->revision = hdr->revision; 893 oct->revision = hdr->revision;
848 oct->gs_type = hdr->gs_type; 894 oct->gs_type = hdr->gs_type;
@@ -863,9 +909,9 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
863void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 909void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
864{ 910{
865 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 911 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
866 struct zfcp_port *port = ct->port; 912 struct zfcp_wka_port *wka_port = ct->wka_port;
867 struct zfcp_adapter *adapter = port->adapter; 913 struct zfcp_adapter *adapter = wka_port->adapter;
868 struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp); 914 struct ct_hdr *hdr = sg_virt(ct->resp);
869 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 915 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
870 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 916 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
871 unsigned long flags; 917 unsigned long flags;
@@ -875,7 +921,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
875 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 921 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
876 r->fsf_reqid = (unsigned long)fsf_req; 922 r->fsf_reqid = (unsigned long)fsf_req;
877 r->fsf_seqno = fsf_req->seq_no; 923 r->fsf_seqno = fsf_req->seq_no;
878 r->s_id = port->d_id; 924 r->s_id = wka_port->d_id;
879 r->d_id = fc_host_port_id(adapter->scsi_host); 925 r->d_id = fc_host_port_id(adapter->scsi_host);
880 rct->cmd_rsp_code = hdr->cmd_rsp_code; 926 rct->cmd_rsp_code = hdr->cmd_rsp_code;
881 rct->revision = hdr->revision; 927 rct->revision = hdr->revision;
@@ -922,8 +968,8 @@ void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
922 968
923 zfcp_san_dbf_event_els("oels", 2, fsf_req, 969 zfcp_san_dbf_event_els("oels", 2, fsf_req,
924 fc_host_port_id(els->adapter->scsi_host), 970 fc_host_port_id(els->adapter->scsi_host),
925 els->d_id, *(u8 *) zfcp_sg_to_address(els->req), 971 els->d_id, *(u8 *) sg_virt(els->req),
926 zfcp_sg_to_address(els->req), els->req->length); 972 sg_virt(els->req), els->req->length);
927} 973}
928 974
929/** 975/**
@@ -936,8 +982,7 @@ void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
936 982
937 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, 983 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
938 fc_host_port_id(els->adapter->scsi_host), 984 fc_host_port_id(els->adapter->scsi_host),
939 *(u8 *)zfcp_sg_to_address(els->req), 985 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
940 zfcp_sg_to_address(els->resp),
941 els->resp->length); 986 els->resp->length);
942} 987}
943 988
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0ddb18449d11..e8f450801fea 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -151,6 +151,7 @@ struct zfcp_hba_dbf_record {
151 struct zfcp_hba_dbf_record_response response; 151 struct zfcp_hba_dbf_record_response response;
152 struct zfcp_hba_dbf_record_status status; 152 struct zfcp_hba_dbf_record_status status;
153 struct zfcp_hba_dbf_record_qdio qdio; 153 struct zfcp_hba_dbf_record_qdio qdio;
154 struct fsf_bit_error_payload berr;
154 } u; 155 } u;
155} __attribute__ ((packed)); 156} __attribute__ ((packed));
156 157
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 67f45fc62f53..8a13071c444c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -39,29 +39,6 @@
39 39
40/********************* GENERAL DEFINES *********************************/ 40/********************* GENERAL DEFINES *********************************/
41 41
42/**
43 * zfcp_sg_to_address - determine kernel address from struct scatterlist
44 * @list: struct scatterlist
45 * Return: kernel address
46 */
47static inline void *
48zfcp_sg_to_address(struct scatterlist *list)
49{
50 return sg_virt(list);
51}
52
53/**
54 * zfcp_address_to_sg - set up struct scatterlist from kernel address
55 * @address: kernel address
56 * @list: struct scatterlist
57 * @size: buffer size
58 */
59static inline void
60zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
61{
62 sg_set_buf(list, address, size);
63}
64
65#define REQUEST_LIST_SIZE 128 42#define REQUEST_LIST_SIZE 128
66 43
67/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
@@ -101,11 +78,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
101 78
102/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 79/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
103 80
104typedef unsigned long long wwn_t;
105typedef unsigned long long fcp_lun_t;
106/* data length field may be at variable position in FCP-2 FCP_CMND IU */
107typedef unsigned int fcp_dl_t;
108
109/* timeout for name-server lookup (in seconds) */ 81/* timeout for name-server lookup (in seconds) */
110#define ZFCP_NS_GID_PN_TIMEOUT 10 82#define ZFCP_NS_GID_PN_TIMEOUT 10
111 83
@@ -129,7 +101,7 @@ typedef unsigned int fcp_dl_t;
129 101
130/* FCP(-2) FCP_CMND IU */ 102/* FCP(-2) FCP_CMND IU */
131struct fcp_cmnd_iu { 103struct fcp_cmnd_iu {
132 fcp_lun_t fcp_lun; /* FCP logical unit number */ 104 u64 fcp_lun; /* FCP logical unit number */
133 u8 crn; /* command reference number */ 105 u8 crn; /* command reference number */
134 u8 reserved0:5; /* reserved */ 106 u8 reserved0:5; /* reserved */
135 u8 task_attribute:3; /* task attribute */ 107 u8 task_attribute:3; /* task attribute */
@@ -204,7 +176,7 @@ struct fcp_rscn_element {
204struct fcp_logo { 176struct fcp_logo {
205 u32 command; 177 u32 command;
206 u32 nport_did; 178 u32 nport_did;
207 wwn_t nport_wwpn; 179 u64 nport_wwpn;
208} __attribute__((packed)); 180} __attribute__((packed));
209 181
210/* 182/*
@@ -218,13 +190,6 @@ struct fcp_logo {
218#define ZFCP_LS_RSCN 0x61 190#define ZFCP_LS_RSCN 0x61
219#define ZFCP_LS_RNID 0x78 191#define ZFCP_LS_RNID 0x78
220 192
221struct zfcp_ls_rjt_par {
222 u8 action;
223 u8 reason_code;
224 u8 reason_expl;
225 u8 vendor_unique;
226} __attribute__ ((packed));
227
228struct zfcp_ls_adisc { 193struct zfcp_ls_adisc {
229 u8 code; 194 u8 code;
230 u8 field[3]; 195 u8 field[3];
@@ -234,20 +199,6 @@ struct zfcp_ls_adisc {
234 u32 nport_id; 199 u32 nport_id;
235} __attribute__ ((packed)); 200} __attribute__ ((packed));
236 201
237struct zfcp_ls_adisc_acc {
238 u8 code;
239 u8 field[3];
240 u32 hard_nport_id;
241 u64 wwpn;
242 u64 wwnn;
243 u32 nport_id;
244} __attribute__ ((packed));
245
246struct zfcp_rc_entry {
247 u8 code;
248 const char *description;
249};
250
251/* 202/*
252 * FC-GS-2 stuff 203 * FC-GS-2 stuff
253 */ 204 */
@@ -281,9 +232,7 @@ struct zfcp_rc_entry {
281#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 232#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
282#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 233#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
283#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 234#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
284#define ZFCP_STATUS_COMMON_OPENING 0x08000000
285#define ZFCP_STATUS_COMMON_OPEN 0x04000000 235#define ZFCP_STATUS_COMMON_OPEN 0x04000000
286#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
287#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 236#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
288#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 237#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
289#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 238#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
@@ -291,16 +240,15 @@ struct zfcp_rc_entry {
291 240
292/* adapter status */ 241/* adapter status */
293#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 242#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
294#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
295#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 243#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
296#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 244#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
297#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 245#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
298#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 246#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
299#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 247#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
300#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 248#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
301#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
302 249
303/* FC-PH/FC-GS well-known address identifiers for generic services */ 250/* FC-PH/FC-GS well-known address identifiers for generic services */
251#define ZFCP_DID_WKA 0xFFFFF0
304#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA 252#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
305#define ZFCP_DID_TIME_SERVICE 0xFFFFFB 253#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
306#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC 254#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
@@ -312,29 +260,27 @@ struct zfcp_rc_entry {
312#define ZFCP_STATUS_PORT_DID_DID 0x00000002 260#define ZFCP_STATUS_PORT_DID_DID 0x00000002
313#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004 261#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
314#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008 262#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
315#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
316#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020 263#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
317 264
318/* for ports with well known addresses */ 265/* well known address (WKA) port status*/
319#define ZFCP_STATUS_PORT_WKA \ 266enum zfcp_wka_status {
320 (ZFCP_STATUS_PORT_NO_WWPN | \ 267 ZFCP_WKA_PORT_OFFLINE,
321 ZFCP_STATUS_PORT_NO_SCSI_ID) 268 ZFCP_WKA_PORT_CLOSING,
269 ZFCP_WKA_PORT_OPENING,
270 ZFCP_WKA_PORT_ONLINE,
271};
322 272
323/* logical unit status */ 273/* logical unit status */
324#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
325#define ZFCP_STATUS_UNIT_SHARED 0x00000004 274#define ZFCP_STATUS_UNIT_SHARED 0x00000004
326#define ZFCP_STATUS_UNIT_READONLY 0x00000008 275#define ZFCP_STATUS_UNIT_READONLY 0x00000008
327#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 276#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
328#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020 277#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
329 278
330/* FSF request status (this does not have a common part) */ 279/* FSF request status (this does not have a common part) */
331#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
332#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
333#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 280#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
334#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 281#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
335#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 282#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
336#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 283#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
337#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
338#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 284#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
339#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 285#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
340#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100 286#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
@@ -379,7 +325,7 @@ struct ct_hdr {
379 * a port name is required */ 325 * a port name is required */
380struct ct_iu_gid_pn_req { 326struct ct_iu_gid_pn_req {
381 struct ct_hdr header; 327 struct ct_hdr header;
382 wwn_t wwpn; 328 u64 wwpn;
383} __attribute__ ((packed)); 329} __attribute__ ((packed));
384 330
385/* FS_ACC IU and data unit for GID_PN nameserver request */ 331/* FS_ACC IU and data unit for GID_PN nameserver request */
@@ -388,11 +334,9 @@ struct ct_iu_gid_pn_resp {
388 u32 d_id; 334 u32 d_id;
389} __attribute__ ((packed)); 335} __attribute__ ((packed));
390 336
391typedef void (*zfcp_send_ct_handler_t)(unsigned long);
392
393/** 337/**
394 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct 338 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
395 * @port: port where the request is sent to 339 * @wka_port: port where the request is sent to
396 * @req: scatter-gather list for request 340 * @req: scatter-gather list for request
397 * @resp: scatter-gather list for response 341 * @resp: scatter-gather list for response
398 * @req_count: number of elements in request scatter-gather list 342 * @req_count: number of elements in request scatter-gather list
@@ -404,12 +348,12 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
404 * @status: used to pass error status to calling function 348 * @status: used to pass error status to calling function
405 */ 349 */
406struct zfcp_send_ct { 350struct zfcp_send_ct {
407 struct zfcp_port *port; 351 struct zfcp_wka_port *wka_port;
408 struct scatterlist *req; 352 struct scatterlist *req;
409 struct scatterlist *resp; 353 struct scatterlist *resp;
410 unsigned int req_count; 354 unsigned int req_count;
411 unsigned int resp_count; 355 unsigned int resp_count;
412 zfcp_send_ct_handler_t handler; 356 void (*handler)(unsigned long);
413 unsigned long handler_data; 357 unsigned long handler_data;
414 int timeout; 358 int timeout;
415 struct completion *completion; 359 struct completion *completion;
@@ -426,8 +370,6 @@ struct zfcp_gid_pn_data {
426 struct zfcp_port *port; 370 struct zfcp_port *port;
427}; 371};
428 372
429typedef void (*zfcp_send_els_handler_t)(unsigned long);
430
431/** 373/**
432 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els 374 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
433 * @adapter: adapter where request is sent from 375 * @adapter: adapter where request is sent from
@@ -451,22 +393,28 @@ struct zfcp_send_els {
451 struct scatterlist *resp; 393 struct scatterlist *resp;
452 unsigned int req_count; 394 unsigned int req_count;
453 unsigned int resp_count; 395 unsigned int resp_count;
454 zfcp_send_els_handler_t handler; 396 void (*handler)(unsigned long);
455 unsigned long handler_data; 397 unsigned long handler_data;
456 struct completion *completion; 398 struct completion *completion;
457 int ls_code; 399 int ls_code;
458 int status; 400 int status;
459}; 401};
460 402
403struct zfcp_wka_port {
404 struct zfcp_adapter *adapter;
405 wait_queue_head_t completion_wq;
406 enum zfcp_wka_status status;
407 atomic_t refcount;
408 u32 d_id;
409 u32 handle;
410 struct mutex mutex;
411 struct delayed_work work;
412};
413
461struct zfcp_qdio_queue { 414struct zfcp_qdio_queue {
462 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 415 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
463 u8 first; /* index of next free bfr 416 u8 first; /* index of next free bfr in queue */
464 in queue (free_count>0) */ 417 atomic_t count; /* number of free buffers in queue */
465 atomic_t count; /* number of free buffers
466 in queue */
467 spinlock_t lock; /* lock for operations on queue */
468 int pci_batch; /* SBALs since PCI indication
469 was last set */
470}; 418};
471 419
472struct zfcp_erp_action { 420struct zfcp_erp_action {
@@ -475,7 +423,7 @@ struct zfcp_erp_action {
475 struct zfcp_adapter *adapter; /* device which should be recovered */ 423 struct zfcp_adapter *adapter; /* device which should be recovered */
476 struct zfcp_port *port; 424 struct zfcp_port *port;
477 struct zfcp_unit *unit; 425 struct zfcp_unit *unit;
478 volatile u32 status; /* recovery status */ 426 u32 status; /* recovery status */
479 u32 step; /* active step of this erp action */ 427 u32 step; /* active step of this erp action */
480 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 428 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
481 for this action */ 429 for this action */
@@ -506,8 +454,8 @@ struct zfcp_adapter {
506 atomic_t refcount; /* reference count */ 454 atomic_t refcount; /* reference count */
507 wait_queue_head_t remove_wq; /* can be used to wait for 455 wait_queue_head_t remove_wq; /* can be used to wait for
508 refcount drop to zero */ 456 refcount drop to zero */
509 wwn_t peer_wwnn; /* P2P peer WWNN */ 457 u64 peer_wwnn; /* P2P peer WWNN */
510 wwn_t peer_wwpn; /* P2P peer WWPN */ 458 u64 peer_wwpn; /* P2P peer WWPN */
511 u32 peer_d_id; /* P2P peer D_ID */ 459 u32 peer_d_id; /* P2P peer D_ID */
512 struct ccw_device *ccw_device; /* S/390 ccw device */ 460 struct ccw_device *ccw_device; /* S/390 ccw device */
513 u32 hydra_version; /* Hydra version */ 461 u32 hydra_version; /* Hydra version */
@@ -518,13 +466,13 @@ struct zfcp_adapter {
518 u16 timer_ticks; /* time int for a tick */ 466 u16 timer_ticks; /* time int for a tick */
519 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 467 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
520 struct list_head port_list_head; /* remote port list */ 468 struct list_head port_list_head; /* remote port list */
521 struct list_head port_remove_lh; /* head of ports to be
522 removed */
523 u32 ports; /* number of remote ports */
524 unsigned long req_no; /* unique FSF req number */ 469 unsigned long req_no; /* unique FSF req number */
525 struct list_head *req_list; /* list of pending reqs */ 470 struct list_head *req_list; /* list of pending reqs */
526 spinlock_t req_list_lock; /* request list lock */ 471 spinlock_t req_list_lock; /* request list lock */
527 struct zfcp_qdio_queue req_q; /* request queue */ 472 struct zfcp_qdio_queue req_q; /* request queue */
473 spinlock_t req_q_lock; /* for operations on queue */
474 int req_q_pci_batch; /* SBALs since PCI indication
475 was last set */
528 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 476 u32 fsf_req_seq_no; /* FSF cmnd seq number */
529 wait_queue_head_t request_wq; /* can be used to wait for 477 wait_queue_head_t request_wq; /* can be used to wait for
530 more avaliable SBALs */ 478 more avaliable SBALs */
@@ -548,7 +496,7 @@ struct zfcp_adapter {
548 actions */ 496 actions */
549 u32 erp_low_mem_count; /* nr of erp actions waiting 497 u32 erp_low_mem_count; /* nr of erp actions waiting
550 for memory */ 498 for memory */
551 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 499 struct zfcp_wka_port nsp; /* adapter's nameserver */
552 debug_info_t *rec_dbf; 500 debug_info_t *rec_dbf;
553 debug_info_t *hba_dbf; 501 debug_info_t *hba_dbf;
554 debug_info_t *san_dbf; /* debug feature areas */ 502 debug_info_t *san_dbf; /* debug feature areas */
@@ -563,11 +511,11 @@ struct zfcp_adapter {
563 struct zfcp_scsi_dbf_record scsi_dbf_buf; 511 struct zfcp_scsi_dbf_record scsi_dbf_buf;
564 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 512 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
565 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 513 struct qdio_initialize qdio_init_data; /* for qdio_establish */
566 struct device generic_services; /* directory for WKA ports */
567 struct fc_host_statistics *fc_stats; 514 struct fc_host_statistics *fc_stats;
568 struct fsf_qtcb_bottom_port *stats_reset_data; 515 struct fsf_qtcb_bottom_port *stats_reset_data;
569 unsigned long stats_reset; 516 unsigned long stats_reset;
570 struct work_struct scan_work; 517 struct work_struct scan_work;
518 atomic_t qdio_outb_full; /* queue full incidents */
571}; 519};
572 520
573struct zfcp_port { 521struct zfcp_port {
@@ -579,18 +527,16 @@ struct zfcp_port {
579 refcount drop to zero */ 527 refcount drop to zero */
580 struct zfcp_adapter *adapter; /* adapter used to access port */ 528 struct zfcp_adapter *adapter; /* adapter used to access port */
581 struct list_head unit_list_head; /* head of logical unit list */ 529 struct list_head unit_list_head; /* head of logical unit list */
582 struct list_head unit_remove_lh; /* head of luns to be removed
583 list */
584 u32 units; /* # of logical units in list */
585 atomic_t status; /* status of this remote port */ 530 atomic_t status; /* status of this remote port */
586 wwn_t wwnn; /* WWNN if known */ 531 u64 wwnn; /* WWNN if known */
587 wwn_t wwpn; /* WWPN */ 532 u64 wwpn; /* WWPN */
588 u32 d_id; /* D_ID */ 533 u32 d_id; /* D_ID */
589 u32 handle; /* handle assigned by FSF */ 534 u32 handle; /* handle assigned by FSF */
590 struct zfcp_erp_action erp_action; /* pending error recovery */ 535 struct zfcp_erp_action erp_action; /* pending error recovery */
591 atomic_t erp_counter; 536 atomic_t erp_counter;
592 u32 maxframe_size; 537 u32 maxframe_size;
593 u32 supported_classes; 538 u32 supported_classes;
539 struct work_struct gid_pn_work;
594}; 540};
595 541
596struct zfcp_unit { 542struct zfcp_unit {
@@ -601,8 +547,7 @@ struct zfcp_unit {
601 refcount drop to zero */ 547 refcount drop to zero */
602 struct zfcp_port *port; /* remote port of unit */ 548 struct zfcp_port *port; /* remote port of unit */
603 atomic_t status; /* status of this logical unit */ 549 atomic_t status; /* status of this logical unit */
604 unsigned int scsi_lun; /* own SCSI LUN */ 550 u64 fcp_lun; /* own FCP_LUN */
605 fcp_lun_t fcp_lun; /* own FCP_LUN */
606 u32 handle; /* handle assigned by FSF */ 551 u32 handle; /* handle assigned by FSF */
607 struct scsi_device *device; /* scsi device struct pointer */ 552 struct scsi_device *device; /* scsi device struct pointer */
608 struct zfcp_erp_action erp_action; /* pending error recovery */ 553 struct zfcp_erp_action erp_action; /* pending error recovery */
@@ -625,7 +570,7 @@ struct zfcp_fsf_req {
625 u8 sbal_response; /* SBAL used in interrupt */ 570 u8 sbal_response; /* SBAL used in interrupt */
626 wait_queue_head_t completion_wq; /* can be used by a routine 571 wait_queue_head_t completion_wq; /* can be used by a routine
627 to wait for completion */ 572 to wait for completion */
628 volatile u32 status; /* status of this request */ 573 u32 status; /* status of this request */
629 u32 fsf_command; /* FSF Command copy */ 574 u32 fsf_command; /* FSF Command copy */
630 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 575 struct fsf_qtcb *qtcb; /* address of associated QTCB */
631 u32 seq_no; /* Sequence number of request */ 576 u32 seq_no; /* Sequence number of request */
@@ -644,23 +589,20 @@ struct zfcp_fsf_req {
644struct zfcp_data { 589struct zfcp_data {
645 struct scsi_host_template scsi_host_template; 590 struct scsi_host_template scsi_host_template;
646 struct scsi_transport_template *scsi_transport_template; 591 struct scsi_transport_template *scsi_transport_template;
647 atomic_t status; /* Module status flags */
648 struct list_head adapter_list_head; /* head of adapter list */ 592 struct list_head adapter_list_head; /* head of adapter list */
649 struct list_head adapter_remove_lh; /* head of adapters to be
650 removed */
651 u32 adapters; /* # of adapters in list */
652 rwlock_t config_lock; /* serialises changes 593 rwlock_t config_lock; /* serialises changes
653 to adapter/port/unit 594 to adapter/port/unit
654 lists */ 595 lists */
655 struct semaphore config_sema; /* serialises configuration 596 struct semaphore config_sema; /* serialises configuration
656 changes */ 597 changes */
657 atomic_t loglevel; /* current loglevel */ 598 atomic_t loglevel; /* current loglevel */
658 char init_busid[BUS_ID_SIZE]; 599 char init_busid[20];
659 wwn_t init_wwpn; 600 u64 init_wwpn;
660 fcp_lun_t init_fcp_lun; 601 u64 init_fcp_lun;
661 struct kmem_cache *fsf_req_qtcb_cache; 602 struct kmem_cache *fsf_req_qtcb_cache;
662 struct kmem_cache *sr_buffer_cache; 603 struct kmem_cache *sr_buffer_cache;
663 struct kmem_cache *gid_pn_cache; 604 struct kmem_cache *gid_pn_cache;
605 struct workqueue_struct *work_queue;
664}; 606};
665 607
666/* struct used by memory pools for fsf_requests */ 608/* struct used by memory pools for fsf_requests */
@@ -677,14 +619,7 @@ struct zfcp_fsf_req_qtcb {
677#define ZFCP_SET 0x00000100 619#define ZFCP_SET 0x00000100
678#define ZFCP_CLEAR 0x00000200 620#define ZFCP_CLEAR 0x00000200
679 621
680#ifndef atomic_test_mask 622#define zfcp_get_busid_by_adapter(adapter) (dev_name(&adapter->ccw_device->dev))
681#define atomic_test_mask(mask, target) \
682 ((atomic_read(target) & mask) == mask)
683#endif
684
685#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
686#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
687#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
688 623
689/* 624/*
690 * Helper functions for request ID management. 625 * Helper functions for request ID management.
@@ -745,12 +680,6 @@ zfcp_unit_put(struct zfcp_unit *unit)
745} 680}
746 681
747static inline void 682static inline void
748zfcp_unit_wait(struct zfcp_unit *unit)
749{
750 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
751}
752
753static inline void
754zfcp_port_get(struct zfcp_port *port) 683zfcp_port_get(struct zfcp_port *port)
755{ 684{
756 atomic_inc(&port->refcount); 685 atomic_inc(&port->refcount);
@@ -764,12 +693,6 @@ zfcp_port_put(struct zfcp_port *port)
764} 693}
765 694
766static inline void 695static inline void
767zfcp_port_wait(struct zfcp_port *port)
768{
769 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
770}
771
772static inline void
773zfcp_adapter_get(struct zfcp_adapter *adapter) 696zfcp_adapter_get(struct zfcp_adapter *adapter)
774{ 697{
775 atomic_inc(&adapter->refcount); 698 atomic_inc(&adapter->refcount);
@@ -782,10 +705,4 @@ zfcp_adapter_put(struct zfcp_adapter *adapter)
782 wake_up(&adapter->remove_wq); 705 wake_up(&adapter->remove_wq);
783} 706}
784 707
785static inline void
786zfcp_adapter_wait(struct zfcp_adapter *adapter)
787{
788 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
789}
790
791#endif /* ZFCP_DEF_H */ 708#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 643ac4bba5b5..9040f738ff33 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -23,7 +23,6 @@ enum zfcp_erp_steps {
23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, 23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
26 ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
27 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, 26 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
28 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 27 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
29 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 28 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
@@ -532,8 +531,7 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
532 struct zfcp_port *port; 531 struct zfcp_port *port;
533 532
534 list_for_each_entry(port, &adapter->port_list_head, list) 533 list_for_each_entry(port, &adapter->port_list_head, list)
535 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)) 534 _zfcp_erp_port_reopen(port, clear, id, ref);
536 _zfcp_erp_port_reopen(port, clear, id, ref);
537} 535}
538 536
539static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id, 537static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
@@ -669,8 +667,6 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
669 int ret; 667 int ret;
670 struct zfcp_adapter *adapter = act->adapter; 668 struct zfcp_adapter *adapter = act->adapter;
671 669
672 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
673
674 write_lock_irq(&adapter->erp_lock); 670 write_lock_irq(&adapter->erp_lock);
675 zfcp_erp_action_to_running(act); 671 zfcp_erp_action_to_running(act);
676 write_unlock_irq(&adapter->erp_lock); 672 write_unlock_irq(&adapter->erp_lock);
@@ -741,8 +737,7 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
741 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 737 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
742 failed_qdio: 738 failed_qdio:
743 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 739 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
744 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 740 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
745 ZFCP_STATUS_ADAPTER_XPORT_OK,
746 &act->adapter->status); 741 &act->adapter->status);
747 return retval; 742 return retval;
748} 743}
@@ -751,15 +746,11 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
751{ 746{
752 int retval; 747 int retval;
753 748
754 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
755 zfcp_erp_adapter_strategy_generic(act, 1); /* close */ 749 zfcp_erp_adapter_strategy_generic(act, 1); /* close */
756 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 750 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT; 751 return ZFCP_ERP_EXIT;
759 752
760 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
761 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */ 753 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
762 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
763 754
764 if (retval == ZFCP_ERP_FAILED) 755 if (retval == ZFCP_ERP_FAILED)
765 ssleep(8); 756 ssleep(8);
@@ -783,10 +774,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
783 774
784static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) 775static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
785{ 776{
786 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 777 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
787 ZFCP_STATUS_COMMON_CLOSING |
788 ZFCP_STATUS_COMMON_ACCESS_DENIED |
789 ZFCP_STATUS_PORT_DID_DID |
790 ZFCP_STATUS_PORT_PHYS_CLOSING | 778 ZFCP_STATUS_PORT_PHYS_CLOSING |
791 ZFCP_STATUS_PORT_INVALID_WWPN, 779 ZFCP_STATUS_PORT_INVALID_WWPN,
792 &port->status); 780 &port->status);
@@ -839,73 +827,12 @@ static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
839 return ZFCP_ERP_CONTINUES; 827 return ZFCP_ERP_CONTINUES;
840} 828}
841 829
842static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
843{
844 unsigned long flags;
845 struct zfcp_adapter *adapter = ns_act->adapter;
846 struct zfcp_erp_action *act, *tmp;
847 int status;
848
849 read_lock_irqsave(&adapter->erp_lock, flags);
850 list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
851 if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
852 status = atomic_read(&adapter->nameserver_port->status);
853 if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
854 zfcp_erp_port_failed(act->port, 27, NULL);
855 zfcp_erp_action_ready(act);
856 }
857 }
858 read_unlock_irqrestore(&adapter->erp_lock, flags);
859}
860
861static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
862{
863 int retval;
864
865 switch (act->step) {
866 case ZFCP_ERP_STEP_UNINITIALIZED:
867 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
868 case ZFCP_ERP_STEP_PORT_CLOSING:
869 return zfcp_erp_port_strategy_open_port(act);
870
871 case ZFCP_ERP_STEP_PORT_OPENING:
872 if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
873 retval = ZFCP_ERP_SUCCEEDED;
874 else
875 retval = ZFCP_ERP_FAILED;
876 /* this is needed anyway */
877 zfcp_erp_port_strategy_open_ns_wake(act);
878 return retval;
879
880 default:
881 return ZFCP_ERP_FAILED;
882 }
883}
884
885static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
886{
887 int retval;
888
889 retval = zfcp_fc_ns_gid_pn_request(act);
890 if (retval == -ENOMEM)
891 return ZFCP_ERP_NOMEM;
892 act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
893 if (retval)
894 return ZFCP_ERP_FAILED;
895 return ZFCP_ERP_CONTINUES;
896}
897
898static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) 830static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
899{ 831{
900 struct zfcp_adapter *adapter = act->adapter; 832 struct zfcp_adapter *adapter = act->adapter;
901 struct zfcp_port *port = act->port; 833 struct zfcp_port *port = act->port;
902 834
903 if (port->wwpn != adapter->peer_wwpn) { 835 if (port->wwpn != adapter->peer_wwpn) {
904 dev_err(&adapter->ccw_device->dev,
905 "Failed to open port 0x%016Lx, "
906 "Peer WWPN 0x%016Lx does not "
907 "match.\n", port->wwpn,
908 adapter->peer_wwpn);
909 zfcp_erp_port_failed(port, 25, NULL); 836 zfcp_erp_port_failed(port, 25, NULL);
910 return ZFCP_ERP_FAILED; 837 return ZFCP_ERP_FAILED;
911 } 838 }
@@ -914,11 +841,25 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
914 return zfcp_erp_port_strategy_open_port(act); 841 return zfcp_erp_port_strategy_open_port(act);
915} 842}
916 843
844void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
845{
846 int retval;
847 struct zfcp_port *port = container_of(work, struct zfcp_port,
848 gid_pn_work);
849
850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
851 if (retval == -ENOMEM)
852 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
853 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
854 if (retval)
855 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
856
857}
858
917static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 859static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
918{ 860{
919 struct zfcp_adapter *adapter = act->adapter; 861 struct zfcp_adapter *adapter = act->adapter;
920 struct zfcp_port *port = act->port; 862 struct zfcp_port *port = act->port;
921 struct zfcp_port *ns_port = adapter->nameserver_port;
922 int p_status = atomic_read(&port->status); 863 int p_status = atomic_read(&port->status);
923 864
924 switch (act->step) { 865 switch (act->step) {
@@ -927,28 +868,10 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
927 case ZFCP_ERP_STEP_PORT_CLOSING: 868 case ZFCP_ERP_STEP_PORT_CLOSING:
928 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 869 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
929 return zfcp_erp_open_ptp_port(act); 870 return zfcp_erp_open_ptp_port(act);
930 if (!ns_port) { 871 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
931 dev_err(&adapter->ccw_device->dev, 872 queue_work(zfcp_data.work_queue, &port->gid_pn_work);
932 "Nameserver port unavailable.\n"); 873 return ZFCP_ERP_CONTINUES;
933 return ZFCP_ERP_FAILED;
934 }
935 if (!(atomic_read(&ns_port->status) &
936 ZFCP_STATUS_COMMON_UNBLOCKED)) {
937 /* nameserver port may live again */
938 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
939 &ns_port->status);
940 if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
941 act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
942 return ZFCP_ERP_CONTINUES;
943 }
944 return ZFCP_ERP_FAILED;
945 } 874 }
946 /* else nameserver port is already open, fall through */
947 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
948 if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
949 return ZFCP_ERP_FAILED;
950 return zfcp_erp_port_strategy_open_lookup(act);
951
952 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 875 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
953 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { 876 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
954 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) { 877 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
@@ -961,25 +884,26 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
961 884
962 case ZFCP_ERP_STEP_PORT_OPENING: 885 case ZFCP_ERP_STEP_PORT_OPENING:
963 /* D_ID might have changed during open */ 886 /* D_ID might have changed during open */
964 if ((p_status & ZFCP_STATUS_COMMON_OPEN) && 887 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
965 (p_status & ZFCP_STATUS_PORT_DID_DID)) 888 if (p_status & ZFCP_STATUS_PORT_DID_DID)
966 return ZFCP_ERP_SUCCEEDED; 889 return ZFCP_ERP_SUCCEEDED;
890 else {
891 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
892 return ZFCP_ERP_CONTINUES;
893 }
967 /* fall through otherwise */ 894 /* fall through otherwise */
895 }
968 } 896 }
969 return ZFCP_ERP_FAILED; 897 return ZFCP_ERP_FAILED;
970} 898}
971 899
972static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
973{
974 if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
975 return zfcp_erp_port_strategy_open_nameserver(act);
976 return zfcp_erp_port_strategy_open_common(act);
977}
978
979static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 900static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
980{ 901{
981 struct zfcp_port *port = erp_action->port; 902 struct zfcp_port *port = erp_action->port;
982 903
904 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
905 goto close_init_done;
906
983 switch (erp_action->step) { 907 switch (erp_action->step) {
984 case ZFCP_ERP_STEP_UNINITIALIZED: 908 case ZFCP_ERP_STEP_UNINITIALIZED:
985 zfcp_erp_port_strategy_clearstati(port); 909 zfcp_erp_port_strategy_clearstati(port);
@@ -992,19 +916,17 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
992 return ZFCP_ERP_FAILED; 916 return ZFCP_ERP_FAILED;
993 break; 917 break;
994 } 918 }
919
920close_init_done:
995 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 921 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
996 return ZFCP_ERP_EXIT; 922 return ZFCP_ERP_EXIT;
997 else
998 return zfcp_erp_port_strategy_open(erp_action);
999 923
1000 return ZFCP_ERP_FAILED; 924 return zfcp_erp_port_strategy_open_common(erp_action);
1001} 925}
1002 926
1003static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 927static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
1004{ 928{
1005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 929 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1006 ZFCP_STATUS_COMMON_CLOSING |
1007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
1008 ZFCP_STATUS_UNIT_SHARED | 930 ZFCP_STATUS_UNIT_SHARED |
1009 ZFCP_STATUS_UNIT_READONLY, 931 ZFCP_STATUS_UNIT_READONLY,
1010 &unit->status); 932 &unit->status);
@@ -1065,8 +987,14 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1065 break; 987 break;
1066 case ZFCP_ERP_FAILED : 988 case ZFCP_ERP_FAILED :
1067 atomic_inc(&unit->erp_counter); 989 atomic_inc(&unit->erp_counter);
1068 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) 990 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
991 dev_err(&unit->port->adapter->ccw_device->dev,
992 "ERP failed for unit 0x%016Lx on "
993 "port 0x%016Lx\n",
994 (unsigned long long)unit->fcp_lun,
995 (unsigned long long)unit->port->wwpn);
1069 zfcp_erp_unit_failed(unit, 21, NULL); 996 zfcp_erp_unit_failed(unit, 21, NULL);
997 }
1070 break; 998 break;
1071 } 999 }
1072 1000
@@ -1091,8 +1019,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1091 result = ZFCP_ERP_EXIT; 1019 result = ZFCP_ERP_EXIT;
1092 } 1020 }
1093 atomic_inc(&port->erp_counter); 1021 atomic_inc(&port->erp_counter);
1094 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) 1022 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
1023 dev_err(&port->adapter->ccw_device->dev,
1024 "ERP failed for remote port 0x%016Lx\n",
1025 (unsigned long long)port->wwpn);
1095 zfcp_erp_port_failed(port, 22, NULL); 1026 zfcp_erp_port_failed(port, 22, NULL);
1027 }
1096 break; 1028 break;
1097 } 1029 }
1098 1030
@@ -1114,8 +1046,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1114 1046
1115 case ZFCP_ERP_FAILED : 1047 case ZFCP_ERP_FAILED :
1116 atomic_inc(&adapter->erp_counter); 1048 atomic_inc(&adapter->erp_counter);
1117 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) 1049 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
1050 dev_err(&adapter->ccw_device->dev,
1051 "ERP cannot recover an error "
1052 "on the FCP device\n");
1118 zfcp_erp_adapter_failed(adapter, 23, NULL); 1053 zfcp_erp_adapter_failed(adapter, 23, NULL);
1054 }
1119 break; 1055 break;
1120 } 1056 }
1121 1057
@@ -1250,9 +1186,10 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
1250 struct zfcp_unit *unit = p->unit; 1186 struct zfcp_unit *unit = p->unit;
1251 struct fc_rport *rport = unit->port->rport; 1187 struct fc_rport *rport = unit->port->rport;
1252 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1188 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1253 unit->scsi_lun, 0); 1189 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
1254 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1190 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1255 zfcp_unit_put(unit); 1191 zfcp_unit_put(unit);
1192 wake_up(&unit->port->adapter->erp_done_wqh);
1256 kfree(p); 1193 kfree(p);
1257} 1194}
1258 1195
@@ -1263,9 +1200,9 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1263 p = kzalloc(sizeof(*p), GFP_KERNEL); 1200 p = kzalloc(sizeof(*p), GFP_KERNEL);
1264 if (!p) { 1201 if (!p) {
1265 dev_err(&unit->port->adapter->ccw_device->dev, 1202 dev_err(&unit->port->adapter->ccw_device->dev,
1266 "Out of resources. Could not register unit " 1203 "Registering unit 0x%016Lx on port 0x%016Lx failed\n",
1267 "0x%016Lx on port 0x%016Lx with SCSI stack.\n", 1204 (unsigned long long)unit->fcp_lun,
1268 unit->fcp_lun, unit->port->wwpn); 1205 (unsigned long long)unit->port->wwpn);
1269 return; 1206 return;
1270 } 1207 }
1271 1208
@@ -1273,7 +1210,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1273 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1210 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1274 INIT_WORK(&p->work, zfcp_erp_scsi_scan); 1211 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1275 p->unit = unit; 1212 p->unit = unit;
1276 schedule_work(&p->work); 1213 queue_work(zfcp_data.work_queue, &p->work);
1277} 1214}
1278 1215
1279static void zfcp_erp_rport_register(struct zfcp_port *port) 1216static void zfcp_erp_rport_register(struct zfcp_port *port)
@@ -1286,8 +1223,8 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1286 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 1223 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1287 if (!port->rport) { 1224 if (!port->rport) {
1288 dev_err(&port->adapter->ccw_device->dev, 1225 dev_err(&port->adapter->ccw_device->dev,
1289 "Failed registration of rport " 1226 "Registering port 0x%016Lx failed\n",
1290 "0x%016Lx.\n", port->wwpn); 1227 (unsigned long long)port->wwpn);
1291 return; 1228 return;
1292 } 1229 }
1293 1230
@@ -1299,12 +1236,12 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1299static void zfcp_erp_rports_del(struct zfcp_adapter *adapter) 1236static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1300{ 1237{
1301 struct zfcp_port *port; 1238 struct zfcp_port *port;
1302 list_for_each_entry(port, &adapter->port_list_head, list) 1239 list_for_each_entry(port, &adapter->port_list_head, list) {
1303 if (port->rport && !(atomic_read(&port->status) & 1240 if (!port->rport)
1304 ZFCP_STATUS_PORT_WKA)) { 1241 continue;
1305 fc_remote_port_delete(port->rport); 1242 fc_remote_port_delete(port->rport);
1306 port->rport = NULL; 1243 port->rport = NULL;
1307 } 1244 }
1308} 1245}
1309 1246
1310static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1247static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1439,7 +1376,7 @@ static int zfcp_erp_thread(void *data)
1439 struct zfcp_erp_action *act; 1376 struct zfcp_erp_action *act;
1440 unsigned long flags; 1377 unsigned long flags;
1441 1378
1442 daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id); 1379 daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
1443 /* Block all signals */ 1380 /* Block all signals */
1444 siginitsetinv(&current->blocked, 0); 1381 siginitsetinv(&current->blocked, 0);
1445 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1382 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1459,9 +1396,9 @@ static int zfcp_erp_thread(void *data)
1459 zfcp_erp_wakeup(adapter); 1396 zfcp_erp_wakeup(adapter);
1460 } 1397 }
1461 1398
1462 zfcp_rec_dbf_event_thread(4, adapter); 1399 zfcp_rec_dbf_event_thread_lock(4, adapter);
1463 down_interruptible(&adapter->erp_ready_sem); 1400 down_interruptible(&adapter->erp_ready_sem);
1464 zfcp_rec_dbf_event_thread(5, adapter); 1401 zfcp_rec_dbf_event_thread_lock(5, adapter);
1465 } 1402 }
1466 1403
1467 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1404 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1484,7 +1421,7 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1484 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1421 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1485 if (retval < 0) { 1422 if (retval < 0) {
1486 dev_err(&adapter->ccw_device->dev, 1423 dev_err(&adapter->ccw_device->dev,
1487 "Creation of ERP thread failed.\n"); 1424 "Creating an ERP thread for the FCP device failed.\n");
1488 return retval; 1425 return retval;
1489 } 1426 }
1490 wait_event(adapter->erp_thread_wqh, 1427 wait_event(adapter->erp_thread_wqh,
@@ -1506,7 +1443,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1506{ 1443{
1507 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); 1444 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1508 up(&adapter->erp_ready_sem); 1445 up(&adapter->erp_ready_sem);
1509 zfcp_rec_dbf_event_thread_lock(2, adapter); 1446 zfcp_rec_dbf_event_thread_lock(3, adapter);
1510 1447
1511 wait_event(adapter->erp_thread_wqh, 1448 wait_event(adapter->erp_thread_wqh,
1512 !(atomic_read(&adapter->status) & 1449 !(atomic_read(&adapter->status) &
@@ -1526,7 +1463,6 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1526{ 1463{
1527 zfcp_erp_modify_adapter_status(adapter, id, ref, 1464 zfcp_erp_modify_adapter_status(adapter, id, ref,
1528 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1465 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1529 dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
1530} 1466}
1531 1467
1532/** 1468/**
@@ -1539,15 +1475,6 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1539{ 1475{
1540 zfcp_erp_modify_port_status(port, id, ref, 1476 zfcp_erp_modify_port_status(port, id, ref,
1541 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1477 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1542
1543 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1544 dev_err(&port->adapter->ccw_device->dev,
1545 "Port ERP failed for WKA port d_id=0x%06x.\n",
1546 port->d_id);
1547 else
1548 dev_err(&port->adapter->ccw_device->dev,
1549 "Port ERP failed for port wwpn=0x%016Lx.\n",
1550 port->wwpn);
1551} 1478}
1552 1479
1553/** 1480/**
@@ -1560,10 +1487,6 @@ void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
1560{ 1487{
1561 zfcp_erp_modify_unit_status(unit, id, ref, 1488 zfcp_erp_modify_unit_status(unit, id, ref,
1562 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1489 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1563
1564 dev_err(&unit->port->adapter->ccw_device->dev,
1565 "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
1566 unit->fcp_lun, unit->port->wwpn);
1567} 1490}
1568 1491
1569/** 1492/**
@@ -1754,9 +1677,8 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1754 1677
1755 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1678 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1756 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1679 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1757 if (!(status & ZFCP_STATUS_PORT_WKA)) 1680 list_for_each_entry(unit, &port->unit_list_head, list)
1758 list_for_each_entry(unit, &port->unit_list_head, list) 1681 zfcp_erp_unit_access_changed(unit, id, ref);
1759 zfcp_erp_unit_access_changed(unit, id, ref);
1760 return; 1682 return;
1761 } 1683 }
1762 1684
@@ -1779,10 +1701,7 @@ void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
1779 return; 1701 return;
1780 1702
1781 read_lock_irqsave(&zfcp_data.config_lock, flags); 1703 read_lock_irqsave(&zfcp_data.config_lock, flags);
1782 if (adapter->nameserver_port)
1783 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
1784 list_for_each_entry(port, &adapter->port_list_head, list) 1704 list_for_each_entry(port, &adapter->port_list_head, list)
1785 if (port != adapter->nameserver_port) 1705 zfcp_erp_port_access_changed(port, id, ref);
1786 zfcp_erp_port_access_changed(port, id, ref);
1787 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1706 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1788} 1707}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index edfdb21591f3..b5adeda93e1d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -12,16 +12,14 @@
12#include "zfcp_def.h" 12#include "zfcp_def.h"
13 13
14/* zfcp_aux.c */ 14/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, 15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16 fcp_lun_t); 16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
18 wwn_t);
19extern int zfcp_adapter_enqueue(struct ccw_device *); 17extern int zfcp_adapter_enqueue(struct ccw_device *);
20extern void zfcp_adapter_dequeue(struct zfcp_adapter *); 18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32, 19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 20 u32);
23extern void zfcp_port_dequeue(struct zfcp_port *); 21extern void zfcp_port_dequeue(struct zfcp_port *);
24extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); 22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
25extern void zfcp_unit_dequeue(struct zfcp_unit *); 23extern void zfcp_unit_dequeue(struct zfcp_unit *);
26extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
27extern void zfcp_sg_free_table(struct scatterlist *, int); 25extern void zfcp_sg_free_table(struct scatterlist *, int);
@@ -29,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
29 27
30/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
31extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
32 31
33/* zfcp_cfdc.c */ 32/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 33extern struct miscdevice zfcp_cfdc_misc;
@@ -50,6 +49,8 @@ extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
50 struct fsf_status_read_buffer *); 49 struct fsf_status_read_buffer *);
51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, 50extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
52 int); 51 int);
52extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
53 struct zfcp_fsf_req *);
53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 56extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
@@ -91,17 +92,21 @@ extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
91extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *); 92extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
92extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); 93extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
93extern void zfcp_erp_timeout_handler(unsigned long); 94extern void zfcp_erp_timeout_handler(unsigned long);
95extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
94 96
95/* zfcp_fc.c */ 97/* zfcp_fc.c */
96extern int zfcp_scan_ports(struct zfcp_adapter *); 98extern int zfcp_scan_ports(struct zfcp_adapter *);
97extern void _zfcp_scan_ports_later(struct work_struct *); 99extern void _zfcp_scan_ports_later(struct work_struct *);
98extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
99extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *); 101extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 102extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
101extern void zfcp_test_link(struct zfcp_port *); 103extern void zfcp_test_link(struct zfcp_port *);
104extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
102 105
103/* zfcp_fsf.c */ 106/* zfcp_fsf.c */
104extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 107extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
108extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
109extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
105extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 110extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
106extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 111extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
107extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 112extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -135,10 +140,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
135extern int zfcp_qdio_allocate(struct zfcp_adapter *); 140extern int zfcp_qdio_allocate(struct zfcp_adapter *);
136extern void zfcp_qdio_free(struct zfcp_adapter *); 141extern void zfcp_qdio_free(struct zfcp_adapter *);
137extern int zfcp_qdio_send(struct zfcp_fsf_req *); 142extern int zfcp_qdio_send(struct zfcp_fsf_req *);
138extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req( 143extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
139 struct zfcp_fsf_req *); 144extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
140extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
141 struct zfcp_fsf_req *);
142extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, 145extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
143 struct scatterlist *, int); 146 struct scatterlist *, int);
144extern int zfcp_qdio_open(struct zfcp_adapter *); 147extern int zfcp_qdio_open(struct zfcp_adapter *);
@@ -148,14 +151,12 @@ extern void zfcp_qdio_close(struct zfcp_adapter *);
148extern struct zfcp_data zfcp_data; 151extern struct zfcp_data zfcp_data;
149extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 152extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
150extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 153extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
151extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
152extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 154extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
153extern struct fc_function_template zfcp_transport_functions; 155extern struct fc_function_template zfcp_transport_functions;
154 156
155/* zfcp_sysfs.c */ 157/* zfcp_sysfs.c */
156extern struct attribute_group zfcp_sysfs_unit_attrs; 158extern struct attribute_group zfcp_sysfs_unit_attrs;
157extern struct attribute_group zfcp_sysfs_adapter_attrs; 159extern struct attribute_group zfcp_sysfs_adapter_attrs;
158extern struct attribute_group zfcp_sysfs_ns_port_attrs;
159extern struct attribute_group zfcp_sysfs_port_attrs; 160extern struct attribute_group zfcp_sysfs_port_attrs;
160extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 161extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
161extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 162extern struct device_attribute *zfcp_sysfs_shost_attrs[];
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index e984469bb98b..1a7c80a77ff5 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -39,16 +39,82 @@ struct zfcp_gpn_ft {
39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS]; 39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
40}; 40};
41 41
42static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter, 42struct zfcp_fc_ns_handler_data {
43 u32 d_id) 43 struct completion done;
44 void (*handler)(unsigned long);
45 unsigned long handler_data;
46};
47
48static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
44{ 49{
45 struct zfcp_port *port; 50 if (mutex_lock_interruptible(&wka_port->mutex))
51 return -ERESTARTSYS;
46 52
47 list_for_each_entry(port, &adapter->port_list_head, list) 53 if (wka_port->status != ZFCP_WKA_PORT_ONLINE) {
48 if ((port->d_id == d_id) && 54 wka_port->status = ZFCP_WKA_PORT_OPENING;
49 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) 55 if (zfcp_fsf_open_wka_port(wka_port))
50 return port; 56 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
51 return NULL; 57 }
58
59 mutex_unlock(&wka_port->mutex);
60
61 wait_event_timeout(
62 wka_port->completion_wq,
63 wka_port->status == ZFCP_WKA_PORT_ONLINE ||
64 wka_port->status == ZFCP_WKA_PORT_OFFLINE,
65 HZ >> 1);
66
67 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
68 atomic_inc(&wka_port->refcount);
69 return 0;
70 }
71 return -EIO;
72}
73
74static void zfcp_wka_port_offline(struct work_struct *work)
75{
76 struct delayed_work *dw = container_of(work, struct delayed_work, work);
77 struct zfcp_wka_port *wka_port =
78 container_of(dw, struct zfcp_wka_port, work);
79
80 wait_event(wka_port->completion_wq,
81 atomic_read(&wka_port->refcount) == 0);
82
83 mutex_lock(&wka_port->mutex);
84 if ((atomic_read(&wka_port->refcount) != 0) ||
85 (wka_port->status != ZFCP_WKA_PORT_ONLINE))
86 goto out;
87
88 wka_port->status = ZFCP_WKA_PORT_CLOSING;
89 if (zfcp_fsf_close_wka_port(wka_port)) {
90 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
91 wake_up(&wka_port->completion_wq);
92 }
93out:
94 mutex_unlock(&wka_port->mutex);
95}
96
97static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
98{
99 if (atomic_dec_return(&wka_port->refcount) != 0)
100 return;
101 /* wait 10 miliseconds, other reqs might pop in */
102 schedule_delayed_work(&wka_port->work, HZ / 100);
103}
104
105void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
106{
107 struct zfcp_wka_port *wka_port = &adapter->nsp;
108
109 init_waitqueue_head(&wka_port->completion_wq);
110
111 wka_port->adapter = adapter;
112 wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
113
114 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
115 atomic_set(&wka_port->refcount, 0);
116 mutex_init(&wka_port->mutex);
117 INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
52} 118}
53 119
54static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 120static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
@@ -59,10 +125,8 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
59 125
60 read_lock_irqsave(&zfcp_data.config_lock, flags); 126 read_lock_irqsave(&zfcp_data.config_lock, flags);
61 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
62 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
63 continue;
64 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 128 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
65 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) 129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
66 /* Try to connect to unused ports anyway. */ 130 /* Try to connect to unused ports anyway. */
67 zfcp_erp_port_reopen(port, 131 zfcp_erp_port_reopen(port,
68 ZFCP_STATUS_COMMON_ERP_FAILED, 132 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -114,7 +178,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
114 schedule_work(&fsf_req->adapter->scan_work); 178 schedule_work(&fsf_req->adapter->scan_work);
115} 179}
116 180
117static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn) 181static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
118{ 182{
119 struct zfcp_adapter *adapter = req->adapter; 183 struct zfcp_adapter *adapter = req->adapter;
120 struct zfcp_port *port; 184 struct zfcp_port *port;
@@ -169,7 +233,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
169 zfcp_fc_incoming_rscn(fsf_req); 233 zfcp_fc_incoming_rscn(fsf_req);
170} 234}
171 235
172static void zfcp_ns_gid_pn_handler(unsigned long data) 236static void zfcp_fc_ns_handler(unsigned long data)
237{
238 struct zfcp_fc_ns_handler_data *compl_rec =
239 (struct zfcp_fc_ns_handler_data *) data;
240
241 if (compl_rec->handler)
242 compl_rec->handler(compl_rec->handler_data);
243
244 complete(&compl_rec->done);
245}
246
247static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
173{ 248{
174 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data; 249 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
175 struct zfcp_send_ct *ct = &gid_pn->ct; 250 struct zfcp_send_ct *ct = &gid_pn->ct;
@@ -178,43 +253,31 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
178 struct zfcp_port *port = gid_pn->port; 253 struct zfcp_port *port = gid_pn->port;
179 254
180 if (ct->status) 255 if (ct->status)
181 goto out; 256 return;
182 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) { 257 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
183 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status); 258 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
184 goto out; 259 return;
185 } 260 }
186 /* paranoia */ 261 /* paranoia */
187 if (ct_iu_req->wwpn != port->wwpn) 262 if (ct_iu_req->wwpn != port->wwpn)
188 goto out; 263 return;
189 /* looks like a valid d_id */ 264 /* looks like a valid d_id */
190 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 265 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
191 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); 266 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
192out:
193 mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
194} 267}
195 268
196/** 269int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
197 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request 270 struct zfcp_gid_pn_data *gid_pn)
198 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
199 * return: -ENOMEM on error, 0 otherwise
200 */
201int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
202{ 271{
203 int ret;
204 struct zfcp_gid_pn_data *gid_pn;
205 struct zfcp_adapter *adapter = erp_action->adapter; 272 struct zfcp_adapter *adapter = erp_action->adapter;
206 273 struct zfcp_fc_ns_handler_data compl_rec;
207 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); 274 int ret;
208 if (!gid_pn)
209 return -ENOMEM;
210
211 memset(gid_pn, 0, sizeof(*gid_pn));
212 275
213 /* setup parameters for send generic command */ 276 /* setup parameters for send generic command */
214 gid_pn->port = erp_action->port; 277 gid_pn->port = erp_action->port;
215 gid_pn->ct.port = adapter->nameserver_port; 278 gid_pn->ct.wka_port = &adapter->nsp;
216 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 279 gid_pn->ct.handler = zfcp_fc_ns_handler;
217 gid_pn->ct.handler_data = (unsigned long) gid_pn; 280 gid_pn->ct.handler_data = (unsigned long) &compl_rec;
218 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 281 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
219 gid_pn->ct.req = &gid_pn->req; 282 gid_pn->ct.req = &gid_pn->req;
220 gid_pn->ct.resp = &gid_pn->resp; 283 gid_pn->ct.resp = &gid_pn->resp;
@@ -234,10 +297,42 @@ int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
234 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE; 297 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
235 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; 298 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
236 299
300 init_completion(&compl_rec.done);
301 compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
302 compl_rec.handler_data = (unsigned long) gid_pn;
237 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 303 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
238 erp_action); 304 erp_action);
305 if (!ret)
306 wait_for_completion(&compl_rec.done);
307 return ret;
308}
309
310/**
311 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
312 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
313 * return: -ENOMEM on error, 0 otherwise
314 */
315int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
316{
317 int ret;
318 struct zfcp_gid_pn_data *gid_pn;
319 struct zfcp_adapter *adapter = erp_action->adapter;
320
321 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
322 if (!gid_pn)
323 return -ENOMEM;
324
325 memset(gid_pn, 0, sizeof(*gid_pn));
326
327 ret = zfcp_wka_port_get(&adapter->nsp);
239 if (ret) 328 if (ret)
240 mempool_free(gid_pn, adapter->pool.data_gid_pn); 329 goto out;
330
331 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
332
333 zfcp_wka_port_put(&adapter->nsp);
334out:
335 mempool_free(gid_pn, adapter->pool.data_gid_pn);
241 return ret; 336 return ret;
242} 337}
243 338
@@ -267,14 +362,14 @@ struct zfcp_els_adisc {
267 struct scatterlist req; 362 struct scatterlist req;
268 struct scatterlist resp; 363 struct scatterlist resp;
269 struct zfcp_ls_adisc ls_adisc; 364 struct zfcp_ls_adisc ls_adisc;
270 struct zfcp_ls_adisc_acc ls_adisc_acc; 365 struct zfcp_ls_adisc ls_adisc_acc;
271}; 366};
272 367
273static void zfcp_fc_adisc_handler(unsigned long data) 368static void zfcp_fc_adisc_handler(unsigned long data)
274{ 369{
275 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 370 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
276 struct zfcp_port *port = adisc->els.port; 371 struct zfcp_port *port = adisc->els.port;
277 struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc; 372 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
278 373
279 if (adisc->els.status) { 374 if (adisc->els.status) {
280 /* request rejected or timed out */ 375 /* request rejected or timed out */
@@ -307,7 +402,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
307 sg_init_one(adisc->els.req, &adisc->ls_adisc, 402 sg_init_one(adisc->els.req, &adisc->ls_adisc,
308 sizeof(struct zfcp_ls_adisc)); 403 sizeof(struct zfcp_ls_adisc));
309 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 404 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
310 sizeof(struct zfcp_ls_adisc_acc)); 405 sizeof(struct zfcp_ls_adisc));
311 406
312 adisc->els.req_count = 1; 407 adisc->els.req_count = 1;
313 adisc->els.resp_count = 1; 408 adisc->els.resp_count = 1;
@@ -341,37 +436,13 @@ void zfcp_test_link(struct zfcp_port *port)
341 436
342 zfcp_port_get(port); 437 zfcp_port_get(port);
343 retval = zfcp_fc_adisc(port); 438 retval = zfcp_fc_adisc(port);
344 if (retval == 0 || retval == -EBUSY) 439 if (retval == 0)
345 return; 440 return;
346 441
347 /* send of ADISC was not possible */ 442 /* send of ADISC was not possible */
348 zfcp_port_put(port); 443 zfcp_port_put(port);
349 zfcp_erp_port_forced_reopen(port, 0, 65, NULL); 444 if (retval != -EBUSY)
350} 445 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
351
352static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
353{
354 int ret;
355
356 if (!adapter->nameserver_port)
357 return -EINTR;
358
359 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
360 &adapter->nameserver_port->status)) {
361 ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
362 NULL);
363 if (ret)
364 return ret;
365 zfcp_erp_wait(adapter);
366 zfcp_port_put(adapter->nameserver_port);
367 }
368 return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
369 &adapter->nameserver_port->status);
370}
371
372static void zfcp_gpn_ft_handler(unsigned long _done)
373{
374 complete((struct completion *)_done);
375} 446}
376 447
377static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft) 448static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
@@ -415,7 +486,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
415{ 486{
416 struct zfcp_send_ct *ct = &gpn_ft->ct; 487 struct zfcp_send_ct *ct = &gpn_ft->ct;
417 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 488 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
418 struct completion done; 489 struct zfcp_fc_ns_handler_data compl_rec;
419 int ret; 490 int ret;
420 491
421 /* prepare CT IU for GPN_FT */ 492 /* prepare CT IU for GPN_FT */
@@ -432,19 +503,20 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
432 req->fc4_type = ZFCP_CT_SCSI_FCP; 503 req->fc4_type = ZFCP_CT_SCSI_FCP;
433 504
434 /* prepare zfcp_send_ct */ 505 /* prepare zfcp_send_ct */
435 ct->port = adapter->nameserver_port; 506 ct->wka_port = &adapter->nsp;
436 ct->handler = zfcp_gpn_ft_handler; 507 ct->handler = zfcp_fc_ns_handler;
437 ct->handler_data = (unsigned long)&done; 508 ct->handler_data = (unsigned long)&compl_rec;
438 ct->timeout = 10; 509 ct->timeout = 10;
439 ct->req = &gpn_ft->sg_req; 510 ct->req = &gpn_ft->sg_req;
440 ct->resp = gpn_ft->sg_resp; 511 ct->resp = gpn_ft->sg_resp;
441 ct->req_count = 1; 512 ct->req_count = 1;
442 ct->resp_count = ZFCP_GPN_FT_BUFFERS; 513 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
443 514
444 init_completion(&done); 515 init_completion(&compl_rec.done);
516 compl_rec.handler = NULL;
445 ret = zfcp_fsf_send_ct(ct, NULL, NULL); 517 ret = zfcp_fsf_send_ct(ct, NULL, NULL);
446 if (!ret) 518 if (!ret)
447 wait_for_completion(&done); 519 wait_for_completion(&compl_rec.done);
448 return ret; 520 return ret;
449} 521}
450 522
@@ -454,9 +526,8 @@ static void zfcp_validate_port(struct zfcp_port *port)
454 526
455 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 527 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
456 528
457 if (port == adapter->nameserver_port) 529 if ((port->supported_classes != 0) ||
458 return; 530 !list_empty(&port->unit_list_head)) {
459 if ((port->supported_classes != 0) || (port->units != 0)) {
460 zfcp_port_put(port); 531 zfcp_port_put(port);
461 return; 532 return;
462 } 533 }
@@ -472,10 +543,10 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
472 struct scatterlist *sg = gpn_ft->sg_resp; 543 struct scatterlist *sg = gpn_ft->sg_resp;
473 struct ct_hdr *hdr = sg_virt(sg); 544 struct ct_hdr *hdr = sg_virt(sg);
474 struct gpn_ft_resp_acc *acc = sg_virt(sg); 545 struct gpn_ft_resp_acc *acc = sg_virt(sg);
475 struct zfcp_adapter *adapter = ct->port->adapter; 546 struct zfcp_adapter *adapter = ct->wka_port->adapter;
476 struct zfcp_port *port, *tmp; 547 struct zfcp_port *port, *tmp;
477 u32 d_id; 548 u32 d_id;
478 int ret = 0, x; 549 int ret = 0, x, last = 0;
479 550
480 if (ct->status) 551 if (ct->status)
481 return -EIO; 552 return -EIO;
@@ -492,19 +563,27 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
492 down(&zfcp_data.config_sema); 563 down(&zfcp_data.config_sema);
493 564
494 /* first entry is the header */ 565 /* first entry is the header */
495 for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) { 566 for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES && !last; x++) {
496 if (x % (ZFCP_GPN_FT_ENTRIES + 1)) 567 if (x % (ZFCP_GPN_FT_ENTRIES + 1))
497 acc++; 568 acc++;
498 else 569 else
499 acc = sg_virt(++sg); 570 acc = sg_virt(++sg);
500 571
572 last = acc->control & 0x80;
501 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 573 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
502 acc->port_id[2]; 574 acc->port_id[2];
503 575
576 /* don't attach ports with a well known address */
577 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
578 continue;
504 /* skip the adapter's port and known remote ports */ 579 /* skip the adapter's port and known remote ports */
505 if (acc->wwpn == fc_host_port_name(adapter->scsi_host) || 580 if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
506 zfcp_get_port_by_did(adapter, d_id))
507 continue; 581 continue;
582 port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
583 if (port) {
584 zfcp_port_get(port);
585 continue;
586 }
508 587
509 port = zfcp_port_enqueue(adapter, acc->wwpn, 588 port = zfcp_port_enqueue(adapter, acc->wwpn,
510 ZFCP_STATUS_PORT_DID_DID | 589 ZFCP_STATUS_PORT_DID_DID |
@@ -513,8 +592,6 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
513 ret = PTR_ERR(port); 592 ret = PTR_ERR(port);
514 else 593 else
515 zfcp_erp_port_reopen(port, 0, 149, NULL); 594 zfcp_erp_port_reopen(port, 0, 149, NULL);
516 if (acc->control & 0x80) /* last entry */
517 break;
518 } 595 }
519 596
520 zfcp_erp_wait(adapter); 597 zfcp_erp_wait(adapter);
@@ -537,13 +614,15 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
537 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 614 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
538 return 0; 615 return 0;
539 616
540 ret = zfcp_scan_get_nameserver(adapter); 617 ret = zfcp_wka_port_get(&adapter->nsp);
541 if (ret) 618 if (ret)
542 return ret; 619 return ret;
543 620
544 gpn_ft = zfcp_alloc_sg_env(); 621 gpn_ft = zfcp_alloc_sg_env();
545 if (!gpn_ft) 622 if (!gpn_ft) {
546 return -ENOMEM; 623 ret = -ENOMEM;
624 goto out;
625 }
547 626
548 for (i = 0; i < 3; i++) { 627 for (i = 0; i < 3; i++) {
549 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter); 628 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
@@ -556,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
556 } 635 }
557 } 636 }
558 zfcp_free_sg_env(gpn_ft); 637 zfcp_free_sg_env(gpn_ft);
559 638out:
639 zfcp_wka_port_put(&adapter->nsp);
560 return ret; 640 return ret;
561} 641}
562 642
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 19c1ca913874..739356a5c123 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -50,19 +50,16 @@ static u32 fsf_qtcb_type[] = {
50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
51}; 51};
52 52
53static const char *zfcp_act_subtable_type[] = {
54 "unknown", "OS", "WWPN", "DID", "LUN"
55};
56
57static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) 53static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
58{ 54{
59 u16 subtable = table >> 16; 55 u16 subtable = table >> 16;
60 u16 rule = table & 0xffff; 56 u16 rule = table & 0xffff;
57 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
61 58
62 if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type)) 59 if (subtable && subtable < ARRAY_SIZE(act_type))
63 dev_warn(&adapter->ccw_device->dev, 60 dev_warn(&adapter->ccw_device->dev,
64 "Access denied in subtable %s, rule %d.\n", 61 "Access denied according to ACT rule type %s, "
65 zfcp_act_subtable_type[subtable], rule); 62 "rule %d\n", act_type[subtable], rule);
66} 63}
67 64
68static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, 65static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
@@ -70,8 +67,8 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
70{ 67{
71 struct fsf_qtcb_header *header = &req->qtcb->header; 68 struct fsf_qtcb_header *header = &req->qtcb->header;
72 dev_warn(&req->adapter->ccw_device->dev, 69 dev_warn(&req->adapter->ccw_device->dev,
73 "Access denied, cannot send command to port 0x%016Lx.\n", 70 "Access denied to port 0x%016Lx\n",
74 port->wwpn); 71 (unsigned long long)port->wwpn);
75 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 72 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
77 zfcp_erp_port_access_denied(port, 55, req); 74 zfcp_erp_port_access_denied(port, 55, req);
@@ -83,8 +80,9 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
83{ 80{
84 struct fsf_qtcb_header *header = &req->qtcb->header; 81 struct fsf_qtcb_header *header = &req->qtcb->header;
85 dev_warn(&req->adapter->ccw_device->dev, 82 dev_warn(&req->adapter->ccw_device->dev,
86 "Access denied for unit 0x%016Lx on port 0x%016Lx.\n", 83 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
87 unit->fcp_lun, unit->port->wwpn); 84 (unsigned long long)unit->fcp_lun,
85 (unsigned long long)unit->port->wwpn);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 86 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
89 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 87 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
90 zfcp_erp_unit_access_denied(unit, 59, req); 88 zfcp_erp_unit_access_denied(unit, 59, req);
@@ -93,9 +91,8 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
93 91
94static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 92static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
95{ 93{
96 dev_err(&req->adapter->ccw_device->dev, 94 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
97 "Required FC class not supported by adapter, " 95 "operational because of an unsupported FC class\n");
98 "shutting down adapter.\n");
99 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); 96 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 97 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101} 98}
@@ -171,42 +168,6 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
171 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 168 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
172} 169}
173 170
174static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
175{
176 struct zfcp_adapter *adapter = req->adapter;
177 struct fsf_status_read_buffer *sr_buf = req->data;
178 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
179
180 dev_warn(&adapter->ccw_device->dev,
181 "Warning: bit error threshold data "
182 "received for the adapter: "
183 "link failures = %i, loss of sync errors = %i, "
184 "loss of signal errors = %i, "
185 "primitive sequence errors = %i, "
186 "invalid transmission word errors = %i, "
187 "CRC errors = %i).\n",
188 err->link_failure_error_count,
189 err->loss_of_sync_error_count,
190 err->loss_of_signal_error_count,
191 err->primitive_sequence_error_count,
192 err->invalid_transmission_word_error_count,
193 err->crc_error_count);
194 dev_warn(&adapter->ccw_device->dev,
195 "Additional bit error threshold data of the adapter: "
196 "primitive sequence event time-outs = %i, "
197 "elastic buffer overrun errors = %i, "
198 "advertised receive buffer-to-buffer credit = %i, "
199 "current receice buffer-to-buffer credit = %i, "
200 "advertised transmit buffer-to-buffer credit = %i, "
201 "current transmit buffer-to-buffer credit = %i).\n",
202 err->primitive_sequence_event_timeout_count,
203 err->elastic_buffer_overrun_error_count,
204 err->advertised_receive_b2b_credit,
205 err->current_receive_b2b_credit,
206 err->advertised_transmit_b2b_credit,
207 err->current_transmit_b2b_credit);
208}
209
210static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, 171static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
211 struct fsf_link_down_info *link_down) 172 struct fsf_link_down_info *link_down)
212{ 173{
@@ -223,62 +184,66 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
223 switch (link_down->error_code) { 184 switch (link_down->error_code) {
224 case FSF_PSQ_LINK_NO_LIGHT: 185 case FSF_PSQ_LINK_NO_LIGHT:
225 dev_warn(&req->adapter->ccw_device->dev, 186 dev_warn(&req->adapter->ccw_device->dev,
226 "The local link is down: no light detected.\n"); 187 "There is no light signal from the local "
188 "fibre channel cable\n");
227 break; 189 break;
228 case FSF_PSQ_LINK_WRAP_PLUG: 190 case FSF_PSQ_LINK_WRAP_PLUG:
229 dev_warn(&req->adapter->ccw_device->dev, 191 dev_warn(&req->adapter->ccw_device->dev,
230 "The local link is down: wrap plug detected.\n"); 192 "There is a wrap plug instead of a fibre "
193 "channel cable\n");
231 break; 194 break;
232 case FSF_PSQ_LINK_NO_FCP: 195 case FSF_PSQ_LINK_NO_FCP:
233 dev_warn(&req->adapter->ccw_device->dev, 196 dev_warn(&req->adapter->ccw_device->dev,
234 "The local link is down: " 197 "The adjacent fibre channel node does not "
235 "adjacent node on link does not support FCP.\n"); 198 "support FCP\n");
236 break; 199 break;
237 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 200 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
238 dev_warn(&req->adapter->ccw_device->dev, 201 dev_warn(&req->adapter->ccw_device->dev,
239 "The local link is down: " 202 "The FCP device is suspended because of a "
240 "firmware update in progress.\n"); 203 "firmware update\n");
241 break; 204 break;
242 case FSF_PSQ_LINK_INVALID_WWPN: 205 case FSF_PSQ_LINK_INVALID_WWPN:
243 dev_warn(&req->adapter->ccw_device->dev, 206 dev_warn(&req->adapter->ccw_device->dev,
244 "The local link is down: " 207 "The FCP device detected a WWPN that is "
245 "duplicate or invalid WWPN detected.\n"); 208 "duplicate or not valid\n");
246 break; 209 break;
247 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 210 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
248 dev_warn(&req->adapter->ccw_device->dev, 211 dev_warn(&req->adapter->ccw_device->dev,
249 "The local link is down: " 212 "The fibre channel fabric does not support NPIV\n");
250 "no support for NPIV by Fabric.\n");
251 break; 213 break;
252 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 214 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
253 dev_warn(&req->adapter->ccw_device->dev, 215 dev_warn(&req->adapter->ccw_device->dev,
254 "The local link is down: " 216 "The FCP adapter cannot support more NPIV ports\n");
255 "out of resource in FCP daughtercard.\n");
256 break; 217 break;
257 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 218 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
258 dev_warn(&req->adapter->ccw_device->dev, 219 dev_warn(&req->adapter->ccw_device->dev,
259 "The local link is down: " 220 "The adjacent switch cannot support "
260 "out of resource in Fabric.\n"); 221 "more NPIV ports\n");
261 break; 222 break;
262 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 223 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
263 dev_warn(&req->adapter->ccw_device->dev, 224 dev_warn(&req->adapter->ccw_device->dev,
264 "The local link is down: " 225 "The FCP adapter could not log in to the "
265 "unable to login to Fabric.\n"); 226 "fibre channel fabric\n");
266 break; 227 break;
267 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 228 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
268 dev_warn(&req->adapter->ccw_device->dev, 229 dev_warn(&req->adapter->ccw_device->dev,
269 "WWPN assignment file corrupted on adapter.\n"); 230 "The WWPN assignment file on the FCP adapter "
231 "has been damaged\n");
270 break; 232 break;
271 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 233 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
272 dev_warn(&req->adapter->ccw_device->dev, 234 dev_warn(&req->adapter->ccw_device->dev,
273 "Mode table corrupted on adapter.\n"); 235 "The mode table on the FCP adapter "
236 "has been damaged\n");
274 break; 237 break;
275 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 238 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
276 dev_warn(&req->adapter->ccw_device->dev, 239 dev_warn(&req->adapter->ccw_device->dev,
277 "No WWPN for assignment table on adapter.\n"); 240 "All NPIV ports on the FCP adapter have "
241 "been assigned\n");
278 break; 242 break;
279 default: 243 default:
280 dev_warn(&req->adapter->ccw_device->dev, 244 dev_warn(&req->adapter->ccw_device->dev,
281 "The local link to adapter is down.\n"); 245 "The link between the FCP adapter and "
246 "the FC fabric is down\n");
282 } 247 }
283out: 248out:
284 zfcp_erp_adapter_failed(adapter, id, req); 249 zfcp_erp_adapter_failed(adapter, id, req);
@@ -286,27 +251,18 @@ out:
286 251
287static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 252static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
288{ 253{
289 struct zfcp_adapter *adapter = req->adapter;
290 struct fsf_status_read_buffer *sr_buf = req->data; 254 struct fsf_status_read_buffer *sr_buf = req->data;
291 struct fsf_link_down_info *ldi = 255 struct fsf_link_down_info *ldi =
292 (struct fsf_link_down_info *) &sr_buf->payload; 256 (struct fsf_link_down_info *) &sr_buf->payload;
293 257
294 switch (sr_buf->status_subtype) { 258 switch (sr_buf->status_subtype) {
295 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 259 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
296 dev_warn(&adapter->ccw_device->dev,
297 "Physical link is down.\n");
298 zfcp_fsf_link_down_info_eval(req, 38, ldi); 260 zfcp_fsf_link_down_info_eval(req, 38, ldi);
299 break; 261 break;
300 case FSF_STATUS_READ_SUB_FDISC_FAILED: 262 case FSF_STATUS_READ_SUB_FDISC_FAILED:
301 dev_warn(&adapter->ccw_device->dev,
302 "Local link is down "
303 "due to failed FDISC login.\n");
304 zfcp_fsf_link_down_info_eval(req, 39, ldi); 263 zfcp_fsf_link_down_info_eval(req, 39, ldi);
305 break; 264 break;
306 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 265 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
307 dev_warn(&adapter->ccw_device->dev,
308 "Local link is down "
309 "due to firmware update on adapter.\n");
310 zfcp_fsf_link_down_info_eval(req, 40, NULL); 266 zfcp_fsf_link_down_info_eval(req, 40, NULL);
311 }; 267 };
312} 268}
@@ -335,14 +291,17 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
335 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 291 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
336 break; 292 break;
337 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 293 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
338 zfcp_fsf_bit_error_threshold(req); 294 dev_warn(&adapter->ccw_device->dev,
295 "The error threshold for checksum statistics "
296 "has been exceeded\n");
297 zfcp_hba_dbf_event_berr(adapter, req);
339 break; 298 break;
340 case FSF_STATUS_READ_LINK_DOWN: 299 case FSF_STATUS_READ_LINK_DOWN:
341 zfcp_fsf_status_read_link_down(req); 300 zfcp_fsf_status_read_link_down(req);
342 break; 301 break;
343 case FSF_STATUS_READ_LINK_UP: 302 case FSF_STATUS_READ_LINK_UP:
344 dev_info(&adapter->ccw_device->dev, 303 dev_info(&adapter->ccw_device->dev,
345 "Local link was replugged.\n"); 304 "The local link has been restored\n");
346 /* All ports should be marked as ready to run again */ 305 /* All ports should be marked as ready to run again */
347 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 306 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
348 ZFCP_STATUS_COMMON_RUNNING, 307 ZFCP_STATUS_COMMON_RUNNING,
@@ -370,7 +329,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
370 zfcp_fsf_req_free(req); 329 zfcp_fsf_req_free(req);
371 330
372 atomic_inc(&adapter->stat_miss); 331 atomic_inc(&adapter->stat_miss);
373 schedule_work(&adapter->stat_work); 332 queue_work(zfcp_data.work_queue, &adapter->stat_work);
374} 333}
375 334
376static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 335static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -386,8 +345,8 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
386 break; 345 break;
387 case FSF_SQ_NO_RECOM: 346 case FSF_SQ_NO_RECOM:
388 dev_err(&req->adapter->ccw_device->dev, 347 dev_err(&req->adapter->ccw_device->dev,
389 "No recommendation could be given for a " 348 "The FCP adapter reported a problem "
390 "problem on the adapter.\n"); 349 "that cannot be recovered\n");
391 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); 350 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
392 break; 351 break;
393 } 352 }
@@ -403,8 +362,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
403 switch (req->qtcb->header.fsf_status) { 362 switch (req->qtcb->header.fsf_status) {
404 case FSF_UNKNOWN_COMMAND: 363 case FSF_UNKNOWN_COMMAND:
405 dev_err(&req->adapter->ccw_device->dev, 364 dev_err(&req->adapter->ccw_device->dev,
406 "Command issued by the device driver (0x%x) is " 365 "The FCP adapter does not recognize the command 0x%x\n",
407 "not known by the adapter.\n",
408 req->qtcb->header.fsf_command); 366 req->qtcb->header.fsf_command);
409 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); 367 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
410 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 368 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -435,11 +393,9 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
435 return; 393 return;
436 case FSF_PROT_QTCB_VERSION_ERROR: 394 case FSF_PROT_QTCB_VERSION_ERROR:
437 dev_err(&adapter->ccw_device->dev, 395 dev_err(&adapter->ccw_device->dev,
438 "The QTCB version requested by zfcp (0x%x) is not " 396 "QTCB version 0x%x not supported by FCP adapter "
439 "supported by the FCP adapter (lowest supported " 397 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
440 "0x%x, highest supported 0x%x).\n", 398 psq->word[0], psq->word[1]);
441 FSF_QTCB_CURRENT_VERSION, psq->word[0],
442 psq->word[1]);
443 zfcp_erp_adapter_shutdown(adapter, 0, 117, req); 399 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
444 break; 400 break;
445 case FSF_PROT_ERROR_STATE: 401 case FSF_PROT_ERROR_STATE:
@@ -449,8 +405,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
449 break; 405 break;
450 case FSF_PROT_UNSUPP_QTCB_TYPE: 406 case FSF_PROT_UNSUPP_QTCB_TYPE:
451 dev_err(&adapter->ccw_device->dev, 407 dev_err(&adapter->ccw_device->dev,
452 "Packet header type used by the device driver is " 408 "The QTCB type is not supported by the FCP adapter\n");
453 "incompatible with that used on the adapter.\n");
454 zfcp_erp_adapter_shutdown(adapter, 0, 118, req); 409 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
455 break; 410 break;
456 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 411 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
@@ -459,7 +414,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
459 break; 414 break;
460 case FSF_PROT_DUPLICATE_REQUEST_ID: 415 case FSF_PROT_DUPLICATE_REQUEST_ID:
461 dev_err(&adapter->ccw_device->dev, 416 dev_err(&adapter->ccw_device->dev,
462 "The request identifier 0x%Lx is ambiguous.\n", 417 "0x%Lx is an ambiguous request identifier\n",
463 (unsigned long long)qtcb->bottom.support.req_handle); 418 (unsigned long long)qtcb->bottom.support.req_handle);
464 zfcp_erp_adapter_shutdown(adapter, 0, 78, req); 419 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
465 break; 420 break;
@@ -479,9 +434,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
479 break; 434 break;
480 default: 435 default:
481 dev_err(&adapter->ccw_device->dev, 436 dev_err(&adapter->ccw_device->dev,
482 "Transfer protocol status information" 437 "0x%x is not a valid transfer protocol status\n",
483 "provided by the adapter (0x%x) "
484 "is not compatible with the device driver.\n",
485 qtcb->prefix.prot_status); 438 qtcb->prefix.prot_status);
486 zfcp_erp_adapter_shutdown(adapter, 0, 119, req); 439 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
487 } 440 }
@@ -559,33 +512,17 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
559 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 512 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
560 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 513 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
561 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 514 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
562 if (req->erp_action)
563 dev_info(&adapter->ccw_device->dev,
564 "Point-to-Point fibrechannel "
565 "configuration detected.\n");
566 break; 515 break;
567 case FSF_TOPO_FABRIC: 516 case FSF_TOPO_FABRIC:
568 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 517 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
569 if (req->erp_action)
570 dev_info(&adapter->ccw_device->dev,
571 "Switched fabric fibrechannel "
572 "network detected.\n");
573 break; 518 break;
574 case FSF_TOPO_AL: 519 case FSF_TOPO_AL:
575 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 520 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
576 dev_err(&adapter->ccw_device->dev,
577 "Unsupported arbitrated loop fibrechannel "
578 "topology detected, shutting down "
579 "adapter.\n");
580 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
581 return -EIO;
582 default: 521 default:
583 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
584 dev_err(&adapter->ccw_device->dev, 522 dev_err(&adapter->ccw_device->dev,
585 "The fibrechannel topology reported by the" 523 "Unknown or unsupported arbitrated loop "
586 " adapter is not known by the zfcp driver," 524 "fibre channel topology detected\n");
587 " shutting down adapter.\n"); 525 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
588 zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
589 return -EIO; 526 return -EIO;
590 } 527 }
591 528
@@ -616,11 +553,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
616 553
617 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 554 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
618 dev_err(&adapter->ccw_device->dev, 555 dev_err(&adapter->ccw_device->dev,
619 "Maximum QTCB size (%d bytes) allowed by " 556 "FCP adapter maximum QTCB size (%d bytes) "
620 "the adapter is lower than the minimum " 557 "is too small\n",
621 "required by the driver (%ld bytes).\n", 558 bottom->max_qtcb_size);
622 bottom->max_qtcb_size,
623 sizeof(struct fsf_qtcb));
624 zfcp_erp_adapter_shutdown(adapter, 0, 129, req); 559 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
625 return; 560 return;
626 } 561 }
@@ -656,15 +591,15 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
656 591
657 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 592 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
658 dev_err(&adapter->ccw_device->dev, 593 dev_err(&adapter->ccw_device->dev,
659 "The adapter only supports newer control block " 594 "The FCP adapter only supports newer "
660 "versions, try updated device driver.\n"); 595 "control block versions\n");
661 zfcp_erp_adapter_shutdown(adapter, 0, 125, req); 596 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
662 return; 597 return;
663 } 598 }
664 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 599 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
665 dev_err(&adapter->ccw_device->dev, 600 dev_err(&adapter->ccw_device->dev,
666 "The adapter only supports older control block " 601 "The FCP adapter only supports older "
667 "versions, consider a microcode upgrade.\n"); 602 "control block versions\n");
668 zfcp_erp_adapter_shutdown(adapter, 0, 126, req); 603 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
669 } 604 }
670} 605}
@@ -688,7 +623,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
688 623
689static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
690{ 625{
691 struct zfcp_adapter *adapter = req->adapter;
692 struct fsf_qtcb *qtcb = req->qtcb; 626 struct fsf_qtcb *qtcb = req->qtcb;
693 627
694 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 628 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -697,38 +631,47 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
697 switch (qtcb->header.fsf_status) { 631 switch (qtcb->header.fsf_status) {
698 case FSF_GOOD: 632 case FSF_GOOD:
699 zfcp_fsf_exchange_port_evaluate(req); 633 zfcp_fsf_exchange_port_evaluate(req);
700 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
701 break; 634 break;
702 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 635 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
703 zfcp_fsf_exchange_port_evaluate(req); 636 zfcp_fsf_exchange_port_evaluate(req);
704 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
705 zfcp_fsf_link_down_info_eval(req, 43, 637 zfcp_fsf_link_down_info_eval(req, 43,
706 &qtcb->header.fsf_status_qual.link_down_info); 638 &qtcb->header.fsf_status_qual.link_down_info);
707 break; 639 break;
708 } 640 }
709} 641}
710 642
711static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue) 643static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
712{ 644{
713 spin_lock(&queue->lock); 645 struct zfcp_qdio_queue *req_q = &adapter->req_q;
714 if (atomic_read(&queue->count)) 646
647 spin_lock_bh(&adapter->req_q_lock);
648 if (atomic_read(&req_q->count))
715 return 1; 649 return 1;
716 spin_unlock(&queue->lock); 650 spin_unlock_bh(&adapter->req_q_lock);
717 return 0; 651 return 0;
718} 652}
719 653
654static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
655{
656 unsigned int count = atomic_read(&adapter->req_q.count);
657 if (!count)
658 atomic_inc(&adapter->qdio_outb_full);
659 return count > 0;
660}
661
720static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 662static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
721{ 663{
722 long ret; 664 long ret;
723 struct zfcp_qdio_queue *req_q = &adapter->req_q;
724 665
725 spin_unlock(&req_q->lock); 666 spin_unlock_bh(&adapter->req_q_lock);
726 ret = wait_event_interruptible_timeout(adapter->request_wq, 667 ret = wait_event_interruptible_timeout(adapter->request_wq,
727 zfcp_fsf_sbal_check(req_q), 5 * HZ); 668 zfcp_fsf_sbal_check(adapter), 5 * HZ);
728 if (ret > 0) 669 if (ret > 0)
729 return 0; 670 return 0;
671 if (!ret)
672 atomic_inc(&adapter->qdio_outb_full);
730 673
731 spin_lock(&req_q->lock); 674 spin_lock_bh(&adapter->req_q_lock);
732 return -EIO; 675 return -EIO;
733} 676}
734 677
@@ -765,7 +708,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
765 u32 fsf_cmd, int req_flags, 708 u32 fsf_cmd, int req_flags,
766 mempool_t *pool) 709 mempool_t *pool)
767{ 710{
768 volatile struct qdio_buffer_element *sbale; 711 struct qdio_buffer_element *sbale;
769 712
770 struct zfcp_fsf_req *req; 713 struct zfcp_fsf_req *req;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q; 714 struct zfcp_qdio_queue *req_q = &adapter->req_q;
@@ -867,17 +810,17 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
867{ 810{
868 struct zfcp_fsf_req *req; 811 struct zfcp_fsf_req *req;
869 struct fsf_status_read_buffer *sr_buf; 812 struct fsf_status_read_buffer *sr_buf;
870 volatile struct qdio_buffer_element *sbale; 813 struct qdio_buffer_element *sbale;
871 int retval = -EIO; 814 int retval = -EIO;
872 815
873 spin_lock(&adapter->req_q.lock); 816 spin_lock_bh(&adapter->req_q_lock);
874 if (zfcp_fsf_req_sbal_get(adapter)) 817 if (zfcp_fsf_req_sbal_get(adapter))
875 goto out; 818 goto out;
876 819
877 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 820 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
878 ZFCP_REQ_NO_QTCB, 821 ZFCP_REQ_NO_QTCB,
879 adapter->pool.fsf_req_status_read); 822 adapter->pool.fsf_req_status_read);
880 if (unlikely(IS_ERR(req))) { 823 if (IS_ERR(req)) {
881 retval = PTR_ERR(req); 824 retval = PTR_ERR(req);
882 goto out; 825 goto out;
883 } 826 }
@@ -910,7 +853,7 @@ failed_buf:
910 zfcp_fsf_req_free(req); 853 zfcp_fsf_req_free(req);
911 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 854 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
912out: 855out:
913 spin_unlock(&adapter->req_q.lock); 856 spin_unlock_bh(&adapter->req_q_lock);
914 return retval; 857 return retval;
915} 858}
916 859
@@ -980,15 +923,15 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
980 struct zfcp_unit *unit, 923 struct zfcp_unit *unit,
981 int req_flags) 924 int req_flags)
982{ 925{
983 volatile struct qdio_buffer_element *sbale; 926 struct qdio_buffer_element *sbale;
984 struct zfcp_fsf_req *req = NULL; 927 struct zfcp_fsf_req *req = NULL;
985 928
986 spin_lock(&adapter->req_q.lock); 929 spin_lock(&adapter->req_q_lock);
987 if (!atomic_read(&adapter->req_q.count)) 930 if (!zfcp_fsf_sbal_available(adapter))
988 goto out; 931 goto out;
989 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
990 req_flags, adapter->pool.fsf_req_abort); 933 req_flags, adapter->pool.fsf_req_abort);
991 if (unlikely(IS_ERR(req))) 934 if (IS_ERR(req))
992 goto out; 935 goto out;
993 936
994 if (unlikely(!(atomic_read(&unit->status) & 937 if (unlikely(!(atomic_read(&unit->status) &
@@ -1013,7 +956,7 @@ out_error_free:
1013 zfcp_fsf_req_free(req); 956 zfcp_fsf_req_free(req);
1014 req = NULL; 957 req = NULL;
1015out: 958out:
1016 spin_unlock(&adapter->req_q.lock); 959 spin_unlock(&adapter->req_q_lock);
1017 return req; 960 return req;
1018} 961}
1019 962
@@ -1021,7 +964,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1021{ 964{
1022 struct zfcp_adapter *adapter = req->adapter; 965 struct zfcp_adapter *adapter = req->adapter;
1023 struct zfcp_send_ct *send_ct = req->data; 966 struct zfcp_send_ct *send_ct = req->data;
1024 struct zfcp_port *port = send_ct->port;
1025 struct fsf_qtcb_header *header = &req->qtcb->header; 967 struct fsf_qtcb_header *header = &req->qtcb->header;
1026 968
1027 send_ct->status = -EINVAL; 969 send_ct->status = -EINVAL;
@@ -1040,17 +982,14 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1040 case FSF_ADAPTER_STATUS_AVAILABLE: 982 case FSF_ADAPTER_STATUS_AVAILABLE:
1041 switch (header->fsf_status_qual.word[0]){ 983 switch (header->fsf_status_qual.word[0]){
1042 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 984 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1043 zfcp_test_link(port);
1044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 985 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1045 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 986 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1046 break; 987 break;
1047 } 988 }
1048 break; 989 break;
1049 case FSF_ACCESS_DENIED: 990 case FSF_ACCESS_DENIED:
1050 zfcp_fsf_access_denied_port(req, port);
1051 break; 991 break;
1052 case FSF_PORT_BOXED: 992 case FSF_PORT_BOXED:
1053 zfcp_erp_port_boxed(port, 49, req);
1054 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 993 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1055 ZFCP_STATUS_FSFREQ_RETRY; 994 ZFCP_STATUS_FSFREQ_RETRY;
1056 break; 995 break;
@@ -1101,18 +1040,18 @@ static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1101int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, 1040int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1102 struct zfcp_erp_action *erp_action) 1041 struct zfcp_erp_action *erp_action)
1103{ 1042{
1104 struct zfcp_port *port = ct->port; 1043 struct zfcp_wka_port *wka_port = ct->wka_port;
1105 struct zfcp_adapter *adapter = port->adapter; 1044 struct zfcp_adapter *adapter = wka_port->adapter;
1106 struct zfcp_fsf_req *req; 1045 struct zfcp_fsf_req *req;
1107 int ret = -EIO; 1046 int ret = -EIO;
1108 1047
1109 spin_lock(&adapter->req_q.lock); 1048 spin_lock_bh(&adapter->req_q_lock);
1110 if (zfcp_fsf_req_sbal_get(adapter)) 1049 if (zfcp_fsf_req_sbal_get(adapter))
1111 goto out; 1050 goto out;
1112 1051
1113 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, 1052 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1114 ZFCP_REQ_AUTO_CLEANUP, pool); 1053 ZFCP_REQ_AUTO_CLEANUP, pool);
1115 if (unlikely(IS_ERR(req))) { 1054 if (IS_ERR(req)) {
1116 ret = PTR_ERR(req); 1055 ret = PTR_ERR(req);
1117 goto out; 1056 goto out;
1118 } 1057 }
@@ -1123,7 +1062,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1123 goto failed_send; 1062 goto failed_send;
1124 1063
1125 req->handler = zfcp_fsf_send_ct_handler; 1064 req->handler = zfcp_fsf_send_ct_handler;
1126 req->qtcb->header.port_handle = port->handle; 1065 req->qtcb->header.port_handle = wka_port->handle;
1127 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1128 req->qtcb->bottom.support.timeout = ct->timeout; 1067 req->qtcb->bottom.support.timeout = ct->timeout;
1129 req->data = ct; 1068 req->data = ct;
@@ -1148,7 +1087,7 @@ failed_send:
1148 if (erp_action) 1087 if (erp_action)
1149 erp_action->fsf_req = NULL; 1088 erp_action->fsf_req = NULL;
1150out: 1089out:
1151 spin_unlock(&adapter->req_q.lock); 1090 spin_unlock_bh(&adapter->req_q_lock);
1152 return ret; 1091 return ret;
1153} 1092}
1154 1093
@@ -1218,18 +1157,18 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1218 ZFCP_STATUS_COMMON_UNBLOCKED))) 1157 ZFCP_STATUS_COMMON_UNBLOCKED)))
1219 return -EBUSY; 1158 return -EBUSY;
1220 1159
1221 spin_lock(&adapter->req_q.lock); 1160 spin_lock(&adapter->req_q_lock);
1222 if (!atomic_read(&adapter->req_q.count)) 1161 if (!zfcp_fsf_sbal_available(adapter))
1223 goto out; 1162 goto out;
1224 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1163 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1225 ZFCP_REQ_AUTO_CLEANUP, NULL); 1164 ZFCP_REQ_AUTO_CLEANUP, NULL);
1226 if (unlikely(IS_ERR(req))) { 1165 if (IS_ERR(req)) {
1227 ret = PTR_ERR(req); 1166 ret = PTR_ERR(req);
1228 goto out; 1167 goto out;
1229 } 1168 }
1230 1169
1231 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 1170 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
1232 FSF_MAX_SBALS_PER_ELS_REQ); 1171
1233 if (ret) 1172 if (ret)
1234 goto failed_send; 1173 goto failed_send;
1235 1174
@@ -1252,25 +1191,25 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1252failed_send: 1191failed_send:
1253 zfcp_fsf_req_free(req); 1192 zfcp_fsf_req_free(req);
1254out: 1193out:
1255 spin_unlock(&adapter->req_q.lock); 1194 spin_unlock(&adapter->req_q_lock);
1256 return ret; 1195 return ret;
1257} 1196}
1258 1197
1259int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1198int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1260{ 1199{
1261 volatile struct qdio_buffer_element *sbale; 1200 struct qdio_buffer_element *sbale;
1262 struct zfcp_fsf_req *req; 1201 struct zfcp_fsf_req *req;
1263 struct zfcp_adapter *adapter = erp_action->adapter; 1202 struct zfcp_adapter *adapter = erp_action->adapter;
1264 int retval = -EIO; 1203 int retval = -EIO;
1265 1204
1266 spin_lock(&adapter->req_q.lock); 1205 spin_lock_bh(&adapter->req_q_lock);
1267 if (!atomic_read(&adapter->req_q.count)) 1206 if (!zfcp_fsf_sbal_available(adapter))
1268 goto out; 1207 goto out;
1269 req = zfcp_fsf_req_create(adapter, 1208 req = zfcp_fsf_req_create(adapter,
1270 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1209 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1271 ZFCP_REQ_AUTO_CLEANUP, 1210 ZFCP_REQ_AUTO_CLEANUP,
1272 adapter->pool.fsf_req_erp); 1211 adapter->pool.fsf_req_erp);
1273 if (unlikely(IS_ERR(req))) { 1212 if (IS_ERR(req)) {
1274 retval = PTR_ERR(req); 1213 retval = PTR_ERR(req);
1275 goto out; 1214 goto out;
1276 } 1215 }
@@ -1295,24 +1234,24 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1295 erp_action->fsf_req = NULL; 1234 erp_action->fsf_req = NULL;
1296 } 1235 }
1297out: 1236out:
1298 spin_unlock(&adapter->req_q.lock); 1237 spin_unlock_bh(&adapter->req_q_lock);
1299 return retval; 1238 return retval;
1300} 1239}
1301 1240
1302int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1241int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1303 struct fsf_qtcb_bottom_config *data) 1242 struct fsf_qtcb_bottom_config *data)
1304{ 1243{
1305 volatile struct qdio_buffer_element *sbale; 1244 struct qdio_buffer_element *sbale;
1306 struct zfcp_fsf_req *req = NULL; 1245 struct zfcp_fsf_req *req = NULL;
1307 int retval = -EIO; 1246 int retval = -EIO;
1308 1247
1309 spin_lock(&adapter->req_q.lock); 1248 spin_lock_bh(&adapter->req_q_lock);
1310 if (zfcp_fsf_req_sbal_get(adapter)) 1249 if (zfcp_fsf_req_sbal_get(adapter))
1311 goto out; 1250 goto out;
1312 1251
1313 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1252 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1314 0, NULL); 1253 0, NULL);
1315 if (unlikely(IS_ERR(req))) { 1254 if (IS_ERR(req)) {
1316 retval = PTR_ERR(req); 1255 retval = PTR_ERR(req);
1317 goto out; 1256 goto out;
1318 } 1257 }
@@ -1334,7 +1273,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1334 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1273 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1335 retval = zfcp_fsf_req_send(req); 1274 retval = zfcp_fsf_req_send(req);
1336out: 1275out:
1337 spin_unlock(&adapter->req_q.lock); 1276 spin_unlock_bh(&adapter->req_q_lock);
1338 if (!retval) 1277 if (!retval)
1339 wait_event(req->completion_wq, 1278 wait_event(req->completion_wq,
1340 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1279 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1351,7 +1290,7 @@ out:
1351 */ 1290 */
1352int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1291int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1353{ 1292{
1354 volatile struct qdio_buffer_element *sbale; 1293 struct qdio_buffer_element *sbale;
1355 struct zfcp_fsf_req *req; 1294 struct zfcp_fsf_req *req;
1356 struct zfcp_adapter *adapter = erp_action->adapter; 1295 struct zfcp_adapter *adapter = erp_action->adapter;
1357 int retval = -EIO; 1296 int retval = -EIO;
@@ -1359,13 +1298,13 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1359 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1298 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1360 return -EOPNOTSUPP; 1299 return -EOPNOTSUPP;
1361 1300
1362 spin_lock(&adapter->req_q.lock); 1301 spin_lock_bh(&adapter->req_q_lock);
1363 if (!atomic_read(&adapter->req_q.count)) 1302 if (!zfcp_fsf_sbal_available(adapter))
1364 goto out; 1303 goto out;
1365 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1304 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1366 ZFCP_REQ_AUTO_CLEANUP, 1305 ZFCP_REQ_AUTO_CLEANUP,
1367 adapter->pool.fsf_req_erp); 1306 adapter->pool.fsf_req_erp);
1368 if (unlikely(IS_ERR(req))) { 1307 if (IS_ERR(req)) {
1369 retval = PTR_ERR(req); 1308 retval = PTR_ERR(req);
1370 goto out; 1309 goto out;
1371 } 1310 }
@@ -1385,7 +1324,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1385 erp_action->fsf_req = NULL; 1324 erp_action->fsf_req = NULL;
1386 } 1325 }
1387out: 1326out:
1388 spin_unlock(&adapter->req_q.lock); 1327 spin_unlock_bh(&adapter->req_q_lock);
1389 return retval; 1328 return retval;
1390} 1329}
1391 1330
@@ -1398,20 +1337,20 @@ out:
1398int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1337int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1399 struct fsf_qtcb_bottom_port *data) 1338 struct fsf_qtcb_bottom_port *data)
1400{ 1339{
1401 volatile struct qdio_buffer_element *sbale; 1340 struct qdio_buffer_element *sbale;
1402 struct zfcp_fsf_req *req = NULL; 1341 struct zfcp_fsf_req *req = NULL;
1403 int retval = -EIO; 1342 int retval = -EIO;
1404 1343
1405 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1344 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1406 return -EOPNOTSUPP; 1345 return -EOPNOTSUPP;
1407 1346
1408 spin_lock(&adapter->req_q.lock); 1347 spin_lock_bh(&adapter->req_q_lock);
1409 if (!atomic_read(&adapter->req_q.count)) 1348 if (!zfcp_fsf_sbal_available(adapter))
1410 goto out; 1349 goto out;
1411 1350
1412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1351 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
1413 NULL); 1352 NULL);
1414 if (unlikely(IS_ERR(req))) { 1353 if (IS_ERR(req)) {
1415 retval = PTR_ERR(req); 1354 retval = PTR_ERR(req);
1416 goto out; 1355 goto out;
1417 } 1356 }
@@ -1427,7 +1366,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1427 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1366 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1428 retval = zfcp_fsf_req_send(req); 1367 retval = zfcp_fsf_req_send(req);
1429out: 1368out:
1430 spin_unlock(&adapter->req_q.lock); 1369 spin_unlock_bh(&adapter->req_q_lock);
1431 if (!retval) 1370 if (!retval)
1432 wait_event(req->completion_wq, 1371 wait_event(req->completion_wq,
1433 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1372 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1443,7 +1382,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1443 struct fsf_plogi *plogi; 1382 struct fsf_plogi *plogi;
1444 1383
1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1384 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1446 goto skip_fsfstatus; 1385 return;
1447 1386
1448 switch (header->fsf_status) { 1387 switch (header->fsf_status) {
1449 case FSF_PORT_ALREADY_OPEN: 1388 case FSF_PORT_ALREADY_OPEN:
@@ -1453,9 +1392,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1453 break; 1392 break;
1454 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1393 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1455 dev_warn(&req->adapter->ccw_device->dev, 1394 dev_warn(&req->adapter->ccw_device->dev,
1456 "The adapter is out of resources. The remote port " 1395 "Not enough FCP adapter resources to open "
1457 "0x%016Lx could not be opened, disabling it.\n", 1396 "remote port 0x%016Lx\n",
1458 port->wwpn); 1397 (unsigned long long)port->wwpn);
1459 zfcp_erp_port_failed(port, 31, req); 1398 zfcp_erp_port_failed(port, 31, req);
1460 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1461 break; 1400 break;
@@ -1467,8 +1406,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1467 break; 1406 break;
1468 case FSF_SQ_NO_RETRY_POSSIBLE: 1407 case FSF_SQ_NO_RETRY_POSSIBLE:
1469 dev_warn(&req->adapter->ccw_device->dev, 1408 dev_warn(&req->adapter->ccw_device->dev,
1470 "The remote port 0x%016Lx could not be " 1409 "Remote port 0x%016Lx could not be opened\n",
1471 "opened. Disabling it.\n", port->wwpn); 1410 (unsigned long long)port->wwpn);
1472 zfcp_erp_port_failed(port, 32, req); 1411 zfcp_erp_port_failed(port, 32, req);
1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1412 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1474 break; 1413 break;
@@ -1496,9 +1435,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1496 * another GID_PN straight after a port has been opened. 1435 * another GID_PN straight after a port has been opened.
1497 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1436 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1498 */ 1437 */
1499 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
1500 break;
1501
1502 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1438 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1503 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { 1439 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
1504 if (plogi->serv_param.wwpn != port->wwpn) 1440 if (plogi->serv_param.wwpn != port->wwpn)
@@ -1514,9 +1450,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1514 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1450 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1515 break; 1451 break;
1516 } 1452 }
1517
1518skip_fsfstatus:
1519 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
1520} 1453}
1521 1454
1522/** 1455/**
@@ -1526,12 +1459,12 @@ skip_fsfstatus:
1526 */ 1459 */
1527int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1460int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1528{ 1461{
1529 volatile struct qdio_buffer_element *sbale; 1462 struct qdio_buffer_element *sbale;
1530 struct zfcp_adapter *adapter = erp_action->adapter; 1463 struct zfcp_adapter *adapter = erp_action->adapter;
1531 struct zfcp_fsf_req *req; 1464 struct zfcp_fsf_req *req;
1532 int retval = -EIO; 1465 int retval = -EIO;
1533 1466
1534 spin_lock(&adapter->req_q.lock); 1467 spin_lock_bh(&adapter->req_q_lock);
1535 if (zfcp_fsf_req_sbal_get(adapter)) 1468 if (zfcp_fsf_req_sbal_get(adapter))
1536 goto out; 1469 goto out;
1537 1470
@@ -1539,7 +1472,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1539 FSF_QTCB_OPEN_PORT_WITH_DID, 1472 FSF_QTCB_OPEN_PORT_WITH_DID,
1540 ZFCP_REQ_AUTO_CLEANUP, 1473 ZFCP_REQ_AUTO_CLEANUP,
1541 adapter->pool.fsf_req_erp); 1474 adapter->pool.fsf_req_erp);
1542 if (unlikely(IS_ERR(req))) { 1475 if (IS_ERR(req)) {
1543 retval = PTR_ERR(req); 1476 retval = PTR_ERR(req);
1544 goto out; 1477 goto out;
1545 } 1478 }
@@ -1553,7 +1486,6 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1553 req->data = erp_action->port; 1486 req->data = erp_action->port;
1554 req->erp_action = erp_action; 1487 req->erp_action = erp_action;
1555 erp_action->fsf_req = req; 1488 erp_action->fsf_req = req;
1556 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
1557 1489
1558 zfcp_fsf_start_erp_timer(req); 1490 zfcp_fsf_start_erp_timer(req);
1559 retval = zfcp_fsf_req_send(req); 1491 retval = zfcp_fsf_req_send(req);
@@ -1562,7 +1494,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1562 erp_action->fsf_req = NULL; 1494 erp_action->fsf_req = NULL;
1563 } 1495 }
1564out: 1496out:
1565 spin_unlock(&adapter->req_q.lock); 1497 spin_unlock_bh(&adapter->req_q_lock);
1566 return retval; 1498 return retval;
1567} 1499}
1568 1500
@@ -1571,7 +1503,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1571 struct zfcp_port *port = req->data; 1503 struct zfcp_port *port = req->data;
1572 1504
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1505 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1574 goto skip_fsfstatus; 1506 return;
1575 1507
1576 switch (req->qtcb->header.fsf_status) { 1508 switch (req->qtcb->header.fsf_status) {
1577 case FSF_PORT_HANDLE_NOT_VALID: 1509 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1586,9 +1518,6 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1586 ZFCP_CLEAR); 1518 ZFCP_CLEAR);
1587 break; 1519 break;
1588 } 1520 }
1589
1590skip_fsfstatus:
1591 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
1592} 1521}
1593 1522
1594/** 1523/**
@@ -1598,19 +1527,19 @@ skip_fsfstatus:
1598 */ 1527 */
1599int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1528int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1600{ 1529{
1601 volatile struct qdio_buffer_element *sbale; 1530 struct qdio_buffer_element *sbale;
1602 struct zfcp_adapter *adapter = erp_action->adapter; 1531 struct zfcp_adapter *adapter = erp_action->adapter;
1603 struct zfcp_fsf_req *req; 1532 struct zfcp_fsf_req *req;
1604 int retval = -EIO; 1533 int retval = -EIO;
1605 1534
1606 spin_lock(&adapter->req_q.lock); 1535 spin_lock_bh(&adapter->req_q_lock);
1607 if (zfcp_fsf_req_sbal_get(adapter)) 1536 if (zfcp_fsf_req_sbal_get(adapter))
1608 goto out; 1537 goto out;
1609 1538
1610 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, 1539 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1611 ZFCP_REQ_AUTO_CLEANUP, 1540 ZFCP_REQ_AUTO_CLEANUP,
1612 adapter->pool.fsf_req_erp); 1541 adapter->pool.fsf_req_erp);
1613 if (unlikely(IS_ERR(req))) { 1542 if (IS_ERR(req)) {
1614 retval = PTR_ERR(req); 1543 retval = PTR_ERR(req);
1615 goto out; 1544 goto out;
1616 } 1545 }
@@ -1624,7 +1553,6 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1624 req->erp_action = erp_action; 1553 req->erp_action = erp_action;
1625 req->qtcb->header.port_handle = erp_action->port->handle; 1554 req->qtcb->header.port_handle = erp_action->port->handle;
1626 erp_action->fsf_req = req; 1555 erp_action->fsf_req = req;
1627 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
1628 1556
1629 zfcp_fsf_start_erp_timer(req); 1557 zfcp_fsf_start_erp_timer(req);
1630 retval = zfcp_fsf_req_send(req); 1558 retval = zfcp_fsf_req_send(req);
@@ -1633,7 +1561,131 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1633 erp_action->fsf_req = NULL; 1561 erp_action->fsf_req = NULL;
1634 } 1562 }
1635out: 1563out:
1636 spin_unlock(&adapter->req_q.lock); 1564 spin_unlock_bh(&adapter->req_q_lock);
1565 return retval;
1566}
1567
1568static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1569{
1570 struct zfcp_wka_port *wka_port = req->data;
1571 struct fsf_qtcb_header *header = &req->qtcb->header;
1572
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1574 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1575 goto out;
1576 }
1577
1578 switch (header->fsf_status) {
1579 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1580 dev_warn(&req->adapter->ccw_device->dev,
1581 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1582 case FSF_ADAPTER_STATUS_AVAILABLE:
1583 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1584 case FSF_ACCESS_DENIED:
1585 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1586 break;
1587 case FSF_PORT_ALREADY_OPEN:
1588 case FSF_GOOD:
1589 wka_port->handle = header->port_handle;
1590 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1591 }
1592out:
1593 wake_up(&wka_port->completion_wq);
1594}
1595
1596/**
1597 * zfcp_fsf_open_wka_port - create and send open wka-port request
1598 * @wka_port: pointer to struct zfcp_wka_port
1599 * Returns: 0 on success, error otherwise
1600 */
1601int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1602{
1603 struct qdio_buffer_element *sbale;
1604 struct zfcp_adapter *adapter = wka_port->adapter;
1605 struct zfcp_fsf_req *req;
1606 int retval = -EIO;
1607
1608 spin_lock_bh(&adapter->req_q_lock);
1609 if (zfcp_fsf_req_sbal_get(adapter))
1610 goto out;
1611
1612 req = zfcp_fsf_req_create(adapter,
1613 FSF_QTCB_OPEN_PORT_WITH_DID,
1614 ZFCP_REQ_AUTO_CLEANUP,
1615 adapter->pool.fsf_req_erp);
1616 if (unlikely(IS_ERR(req))) {
1617 retval = PTR_ERR(req);
1618 goto out;
1619 }
1620
1621 sbale = zfcp_qdio_sbale_req(req);
1622 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1623 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1624
1625 req->handler = zfcp_fsf_open_wka_port_handler;
1626 req->qtcb->bottom.support.d_id = wka_port->d_id;
1627 req->data = wka_port;
1628
1629 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1630 retval = zfcp_fsf_req_send(req);
1631 if (retval)
1632 zfcp_fsf_req_free(req);
1633out:
1634 spin_unlock_bh(&adapter->req_q_lock);
1635 return retval;
1636}
1637
1638static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1639{
1640 struct zfcp_wka_port *wka_port = req->data;
1641
1642 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1643 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1644 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
1645 }
1646
1647 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1648 wake_up(&wka_port->completion_wq);
1649}
1650
1651/**
1652 * zfcp_fsf_close_wka_port - create and send close wka port request
1653 * @erp_action: pointer to struct zfcp_erp_action
1654 * Returns: 0 on success, error otherwise
1655 */
1656int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1657{
1658 struct qdio_buffer_element *sbale;
1659 struct zfcp_adapter *adapter = wka_port->adapter;
1660 struct zfcp_fsf_req *req;
1661 int retval = -EIO;
1662
1663 spin_lock_bh(&adapter->req_q_lock);
1664 if (zfcp_fsf_req_sbal_get(adapter))
1665 goto out;
1666
1667 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1668 ZFCP_REQ_AUTO_CLEANUP,
1669 adapter->pool.fsf_req_erp);
1670 if (unlikely(IS_ERR(req))) {
1671 retval = PTR_ERR(req);
1672 goto out;
1673 }
1674
1675 sbale = zfcp_qdio_sbale_req(req);
1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1678
1679 req->handler = zfcp_fsf_close_wka_port_handler;
1680 req->data = wka_port;
1681 req->qtcb->header.port_handle = wka_port->handle;
1682
1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1684 retval = zfcp_fsf_req_send(req);
1685 if (retval)
1686 zfcp_fsf_req_free(req);
1687out:
1688 spin_unlock_bh(&adapter->req_q_lock);
1637 return retval; 1689 return retval;
1638} 1690}
1639 1691
@@ -1695,19 +1747,19 @@ skip_fsfstatus:
1695 */ 1747 */
1696int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1748int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1697{ 1749{
1698 volatile struct qdio_buffer_element *sbale; 1750 struct qdio_buffer_element *sbale;
1699 struct zfcp_adapter *adapter = erp_action->adapter; 1751 struct zfcp_adapter *adapter = erp_action->adapter;
1700 struct zfcp_fsf_req *req; 1752 struct zfcp_fsf_req *req;
1701 int retval = -EIO; 1753 int retval = -EIO;
1702 1754
1703 spin_lock(&adapter->req_q.lock); 1755 spin_lock_bh(&adapter->req_q_lock);
1704 if (zfcp_fsf_req_sbal_get(adapter)) 1756 if (zfcp_fsf_req_sbal_get(adapter))
1705 goto out; 1757 goto out;
1706 1758
1707 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1759 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1708 ZFCP_REQ_AUTO_CLEANUP, 1760 ZFCP_REQ_AUTO_CLEANUP,
1709 adapter->pool.fsf_req_erp); 1761 adapter->pool.fsf_req_erp);
1710 if (unlikely(IS_ERR(req))) { 1762 if (IS_ERR(req)) {
1711 retval = PTR_ERR(req); 1763 retval = PTR_ERR(req);
1712 goto out; 1764 goto out;
1713 } 1765 }
@@ -1731,7 +1783,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1731 erp_action->fsf_req = NULL; 1783 erp_action->fsf_req = NULL;
1732 } 1784 }
1733out: 1785out:
1734 spin_unlock(&adapter->req_q.lock); 1786 spin_unlock_bh(&adapter->req_q_lock);
1735 return retval; 1787 return retval;
1736} 1788}
1737 1789
@@ -1746,7 +1798,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1746 int exclusive, readwrite; 1798 int exclusive, readwrite;
1747 1799
1748 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1800 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1749 goto skip_fsfstatus; 1801 return;
1750 1802
1751 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1803 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1752 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1804 ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -1774,14 +1826,12 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1774 case FSF_LUN_SHARING_VIOLATION: 1826 case FSF_LUN_SHARING_VIOLATION:
1775 if (header->fsf_status_qual.word[0]) 1827 if (header->fsf_status_qual.word[0])
1776 dev_warn(&adapter->ccw_device->dev, 1828 dev_warn(&adapter->ccw_device->dev,
1777 "FCP-LUN 0x%Lx at the remote port " 1829 "LUN 0x%Lx on port 0x%Lx is already in "
1778 "with WWPN 0x%Lx " 1830 "use by CSS%d, MIF Image ID %x\n",
1779 "connected to the adapter " 1831 (unsigned long long)unit->fcp_lun,
1780 "is already in use in LPAR%d, CSS%d.\n", 1832 (unsigned long long)unit->port->wwpn,
1781 unit->fcp_lun, 1833 queue_designator->cssid,
1782 unit->port->wwpn, 1834 queue_designator->hla);
1783 queue_designator->hla,
1784 queue_designator->cssid);
1785 else 1835 else
1786 zfcp_act_eval_err(adapter, 1836 zfcp_act_eval_err(adapter,
1787 header->fsf_status_qual.word[2]); 1837 header->fsf_status_qual.word[2]);
@@ -1792,9 +1842,10 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1792 break; 1842 break;
1793 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1843 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1794 dev_warn(&adapter->ccw_device->dev, 1844 dev_warn(&adapter->ccw_device->dev,
1795 "The adapter ran out of resources. There is no " 1845 "No handle is available for LUN "
1796 "handle available for unit 0x%016Lx on port 0x%016Lx.", 1846 "0x%016Lx on port 0x%016Lx\n",
1797 unit->fcp_lun, unit->port->wwpn); 1847 (unsigned long long)unit->fcp_lun,
1848 (unsigned long long)unit->port->wwpn);
1798 zfcp_erp_unit_failed(unit, 34, req); 1849 zfcp_erp_unit_failed(unit, 34, req);
1799 /* fall through */ 1850 /* fall through */
1800 case FSF_INVALID_COMMAND_OPTION: 1851 case FSF_INVALID_COMMAND_OPTION:
@@ -1831,26 +1882,29 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1831 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1882 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1832 &unit->status); 1883 &unit->status);
1833 dev_info(&adapter->ccw_device->dev, 1884 dev_info(&adapter->ccw_device->dev,
1834 "Read-only access for unit 0x%016Lx " 1885 "SCSI device at LUN 0x%016Lx on port "
1835 "on port 0x%016Lx.\n", 1886 "0x%016Lx opened read-only\n",
1836 unit->fcp_lun, unit->port->wwpn); 1887 (unsigned long long)unit->fcp_lun,
1888 (unsigned long long)unit->port->wwpn);
1837 } 1889 }
1838 1890
1839 if (exclusive && !readwrite) { 1891 if (exclusive && !readwrite) {
1840 dev_err(&adapter->ccw_device->dev, 1892 dev_err(&adapter->ccw_device->dev,
1841 "Exclusive access of read-only unit " 1893 "Exclusive read-only access not "
1842 "0x%016Lx on port 0x%016Lx not " 1894 "supported (unit 0x%016Lx, "
1843 "supported, disabling unit.\n", 1895 "port 0x%016Lx)\n",
1844 unit->fcp_lun, unit->port->wwpn); 1896 (unsigned long long)unit->fcp_lun,
1897 (unsigned long long)unit->port->wwpn);
1845 zfcp_erp_unit_failed(unit, 35, req); 1898 zfcp_erp_unit_failed(unit, 35, req);
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1899 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 zfcp_erp_unit_shutdown(unit, 0, 80, req); 1900 zfcp_erp_unit_shutdown(unit, 0, 80, req);
1848 } else if (!exclusive && readwrite) { 1901 } else if (!exclusive && readwrite) {
1849 dev_err(&adapter->ccw_device->dev, 1902 dev_err(&adapter->ccw_device->dev,
1850 "Shared access of read-write unit " 1903 "Shared read-write access not "
1851 "0x%016Lx on port 0x%016Lx not " 1904 "supported (unit 0x%016Lx, port "
1852 "supported, disabling unit.\n", 1905 "0x%016Lx\n)",
1853 unit->fcp_lun, unit->port->wwpn); 1906 (unsigned long long)unit->fcp_lun,
1907 (unsigned long long)unit->port->wwpn);
1854 zfcp_erp_unit_failed(unit, 36, req); 1908 zfcp_erp_unit_failed(unit, 36, req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1909 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 zfcp_erp_unit_shutdown(unit, 0, 81, req); 1910 zfcp_erp_unit_shutdown(unit, 0, 81, req);
@@ -1858,9 +1912,6 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1858 } 1912 }
1859 break; 1913 break;
1860 } 1914 }
1861
1862skip_fsfstatus:
1863 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
1864} 1915}
1865 1916
1866/** 1917/**
@@ -1870,19 +1921,19 @@ skip_fsfstatus:
1870 */ 1921 */
1871int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1922int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1872{ 1923{
1873 volatile struct qdio_buffer_element *sbale; 1924 struct qdio_buffer_element *sbale;
1874 struct zfcp_adapter *adapter = erp_action->adapter; 1925 struct zfcp_adapter *adapter = erp_action->adapter;
1875 struct zfcp_fsf_req *req; 1926 struct zfcp_fsf_req *req;
1876 int retval = -EIO; 1927 int retval = -EIO;
1877 1928
1878 spin_lock(&adapter->req_q.lock); 1929 spin_lock_bh(&adapter->req_q_lock);
1879 if (zfcp_fsf_req_sbal_get(adapter)) 1930 if (zfcp_fsf_req_sbal_get(adapter))
1880 goto out; 1931 goto out;
1881 1932
1882 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, 1933 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
1883 ZFCP_REQ_AUTO_CLEANUP, 1934 ZFCP_REQ_AUTO_CLEANUP,
1884 adapter->pool.fsf_req_erp); 1935 adapter->pool.fsf_req_erp);
1885 if (unlikely(IS_ERR(req))) { 1936 if (IS_ERR(req)) {
1886 retval = PTR_ERR(req); 1937 retval = PTR_ERR(req);
1887 goto out; 1938 goto out;
1888 } 1939 }
@@ -1901,8 +1952,6 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1901 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1952 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1902 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1953 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1903 1954
1904 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1905
1906 zfcp_fsf_start_erp_timer(req); 1955 zfcp_fsf_start_erp_timer(req);
1907 retval = zfcp_fsf_req_send(req); 1956 retval = zfcp_fsf_req_send(req);
1908 if (retval) { 1957 if (retval) {
@@ -1910,7 +1959,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1910 erp_action->fsf_req = NULL; 1959 erp_action->fsf_req = NULL;
1911 } 1960 }
1912out: 1961out:
1913 spin_unlock(&adapter->req_q.lock); 1962 spin_unlock_bh(&adapter->req_q_lock);
1914 return retval; 1963 return retval;
1915} 1964}
1916 1965
@@ -1919,7 +1968,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1919 struct zfcp_unit *unit = req->data; 1968 struct zfcp_unit *unit = req->data;
1920 1969
1921 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1970 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1922 goto skip_fsfstatus; 1971 return;
1923 1972
1924 switch (req->qtcb->header.fsf_status) { 1973 switch (req->qtcb->header.fsf_status) {
1925 case FSF_PORT_HANDLE_NOT_VALID: 1974 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1949,8 +1998,6 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1949 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1998 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1950 break; 1999 break;
1951 } 2000 }
1952skip_fsfstatus:
1953 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
1954} 2001}
1955 2002
1956/** 2003/**
@@ -1960,18 +2007,18 @@ skip_fsfstatus:
1960 */ 2007 */
1961int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 2008int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1962{ 2009{
1963 volatile struct qdio_buffer_element *sbale; 2010 struct qdio_buffer_element *sbale;
1964 struct zfcp_adapter *adapter = erp_action->adapter; 2011 struct zfcp_adapter *adapter = erp_action->adapter;
1965 struct zfcp_fsf_req *req; 2012 struct zfcp_fsf_req *req;
1966 int retval = -EIO; 2013 int retval = -EIO;
1967 2014
1968 spin_lock(&adapter->req_q.lock); 2015 spin_lock_bh(&adapter->req_q_lock);
1969 if (zfcp_fsf_req_sbal_get(adapter)) 2016 if (zfcp_fsf_req_sbal_get(adapter))
1970 goto out; 2017 goto out;
1971 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, 2018 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
1972 ZFCP_REQ_AUTO_CLEANUP, 2019 ZFCP_REQ_AUTO_CLEANUP,
1973 adapter->pool.fsf_req_erp); 2020 adapter->pool.fsf_req_erp);
1974 if (unlikely(IS_ERR(req))) { 2021 if (IS_ERR(req)) {
1975 retval = PTR_ERR(req); 2022 retval = PTR_ERR(req);
1976 goto out; 2023 goto out;
1977 } 2024 }
@@ -1986,7 +2033,6 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1986 req->data = erp_action->unit; 2033 req->data = erp_action->unit;
1987 req->erp_action = erp_action; 2034 req->erp_action = erp_action;
1988 erp_action->fsf_req = req; 2035 erp_action->fsf_req = req;
1989 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
1990 2036
1991 zfcp_fsf_start_erp_timer(req); 2037 zfcp_fsf_start_erp_timer(req);
1992 retval = zfcp_fsf_req_send(req); 2038 retval = zfcp_fsf_req_send(req);
@@ -1995,7 +2041,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1995 erp_action->fsf_req = NULL; 2041 erp_action->fsf_req = NULL;
1996 } 2042 }
1997out: 2043out:
1998 spin_unlock(&adapter->req_q.lock); 2044 spin_unlock_bh(&adapter->req_q_lock);
1999 return retval; 2045 return retval;
2000} 2046}
2001 2047
@@ -2156,21 +2202,21 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2156 break; 2202 break;
2157 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2203 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2158 dev_err(&req->adapter->ccw_device->dev, 2204 dev_err(&req->adapter->ccw_device->dev,
2159 "Invalid data direction (%d) given for unit " 2205 "Incorrect direction %d, unit 0x%016Lx on port "
2160 "0x%016Lx on port 0x%016Lx, shutting down " 2206 "0x%016Lx closed\n",
2161 "adapter.\n",
2162 req->qtcb->bottom.io.data_direction, 2207 req->qtcb->bottom.io.data_direction,
2163 unit->fcp_lun, unit->port->wwpn); 2208 (unsigned long long)unit->fcp_lun,
2209 (unsigned long long)unit->port->wwpn);
2164 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); 2210 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
2165 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2211 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2166 break; 2212 break;
2167 case FSF_CMND_LENGTH_NOT_VALID: 2213 case FSF_CMND_LENGTH_NOT_VALID:
2168 dev_err(&req->adapter->ccw_device->dev, 2214 dev_err(&req->adapter->ccw_device->dev,
2169 "An invalid control-data-block length field (%d) " 2215 "Incorrect CDB length %d, unit 0x%016Lx on "
2170 "was found in a command for unit 0x%016Lx on port " 2216 "port 0x%016Lx closed\n",
2171 "0x%016Lx. Shutting down adapter.\n",
2172 req->qtcb->bottom.io.fcp_cmnd_length, 2217 req->qtcb->bottom.io.fcp_cmnd_length,
2173 unit->fcp_lun, unit->port->wwpn); 2218 (unsigned long long)unit->fcp_lun,
2219 (unsigned long long)unit->port->wwpn);
2174 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); 2220 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
2175 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2221 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2176 break; 2222 break;
@@ -2201,6 +2247,20 @@ skip_fsfstatus:
2201 } 2247 }
2202} 2248}
2203 2249
2250static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2251{
2252 u32 *fcp_dl_ptr;
2253
2254 /*
2255 * fcp_dl_addr = start address of fcp_cmnd structure +
2256 * size of fixed part + size of dynamically sized add_dcp_cdb field
2257 * SEE FCP-2 documentation
2258 */
2259 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2260 (fcp_cmd->add_fcp_cdb_length << 2));
2261 *fcp_dl_ptr = fcp_dl;
2262}
2263
2204/** 2264/**
2205 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2265 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2206 * @adapter: adapter where scsi command is issued 2266 * @adapter: adapter where scsi command is issued
@@ -2223,12 +2283,12 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2223 ZFCP_STATUS_COMMON_UNBLOCKED))) 2283 ZFCP_STATUS_COMMON_UNBLOCKED)))
2224 return -EBUSY; 2284 return -EBUSY;
2225 2285
2226 spin_lock(&adapter->req_q.lock); 2286 spin_lock(&adapter->req_q_lock);
2227 if (!atomic_read(&adapter->req_q.count)) 2287 if (!zfcp_fsf_sbal_available(adapter))
2228 goto out; 2288 goto out;
2229 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2289 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2230 adapter->pool.fsf_req_scsi); 2290 adapter->pool.fsf_req_scsi);
2231 if (unlikely(IS_ERR(req))) { 2291 if (IS_ERR(req)) {
2232 retval = PTR_ERR(req); 2292 retval = PTR_ERR(req);
2233 goto out; 2293 goto out;
2234 } 2294 }
@@ -2286,7 +2346,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2286 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2346 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2287 2347
2288 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2348 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2289 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t); 2349 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2290 2350
2291 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, 2351 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
2292 scsi_sglist(scsi_cmnd), 2352 scsi_sglist(scsi_cmnd),
@@ -2296,10 +2356,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2296 retval = -EIO; 2356 retval = -EIO;
2297 else { 2357 else {
2298 dev_err(&adapter->ccw_device->dev, 2358 dev_err(&adapter->ccw_device->dev,
2299 "SCSI request too large. " 2359 "Oversize data package, unit 0x%016Lx "
2300 "Shutting down unit 0x%016Lx on port " 2360 "on port 0x%016Lx closed\n",
2301 "0x%016Lx.\n", unit->fcp_lun, 2361 (unsigned long long)unit->fcp_lun,
2302 unit->port->wwpn); 2362 (unsigned long long)unit->port->wwpn);
2303 zfcp_erp_unit_shutdown(unit, 0, 131, req); 2363 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2304 retval = -EINVAL; 2364 retval = -EINVAL;
2305 } 2365 }
@@ -2322,7 +2382,7 @@ failed_scsi_cmnd:
2322 zfcp_fsf_req_free(req); 2382 zfcp_fsf_req_free(req);
2323 scsi_cmnd->host_scribble = NULL; 2383 scsi_cmnd->host_scribble = NULL;
2324out: 2384out:
2325 spin_unlock(&adapter->req_q.lock); 2385 spin_unlock(&adapter->req_q_lock);
2326 return retval; 2386 return retval;
2327} 2387}
2328 2388
@@ -2338,7 +2398,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2338 struct zfcp_unit *unit, 2398 struct zfcp_unit *unit,
2339 u8 tm_flags, int req_flags) 2399 u8 tm_flags, int req_flags)
2340{ 2400{
2341 volatile struct qdio_buffer_element *sbale; 2401 struct qdio_buffer_element *sbale;
2342 struct zfcp_fsf_req *req = NULL; 2402 struct zfcp_fsf_req *req = NULL;
2343 struct fcp_cmnd_iu *fcp_cmnd_iu; 2403 struct fcp_cmnd_iu *fcp_cmnd_iu;
2344 2404
@@ -2346,12 +2406,12 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2346 ZFCP_STATUS_COMMON_UNBLOCKED))) 2406 ZFCP_STATUS_COMMON_UNBLOCKED)))
2347 return NULL; 2407 return NULL;
2348 2408
2349 spin_lock(&adapter->req_q.lock); 2409 spin_lock(&adapter->req_q_lock);
2350 if (!atomic_read(&adapter->req_q.count)) 2410 if (!zfcp_fsf_sbal_available(adapter))
2351 goto out; 2411 goto out;
2352 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2353 adapter->pool.fsf_req_scsi); 2413 adapter->pool.fsf_req_scsi);
2354 if (unlikely(IS_ERR(req))) 2414 if (IS_ERR(req))
2355 goto out; 2415 goto out;
2356 2416
2357 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2417 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
@@ -2362,7 +2422,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2422 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2423 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2424 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2365 sizeof(fcp_dl_t); 2425 sizeof(u32);
2366 2426
2367 sbale = zfcp_qdio_sbale_req(req); 2427 sbale = zfcp_qdio_sbale_req(req);
2368 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2428 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
@@ -2379,7 +2439,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2379 zfcp_fsf_req_free(req); 2439 zfcp_fsf_req_free(req);
2380 req = NULL; 2440 req = NULL;
2381out: 2441out:
2382 spin_unlock(&adapter->req_q.lock); 2442 spin_unlock(&adapter->req_q_lock);
2383 return req; 2443 return req;
2384} 2444}
2385 2445
@@ -2398,7 +2458,7 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2398struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, 2458struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2399 struct zfcp_fsf_cfdc *fsf_cfdc) 2459 struct zfcp_fsf_cfdc *fsf_cfdc)
2400{ 2460{
2401 volatile struct qdio_buffer_element *sbale; 2461 struct qdio_buffer_element *sbale;
2402 struct zfcp_fsf_req *req = NULL; 2462 struct zfcp_fsf_req *req = NULL;
2403 struct fsf_qtcb_bottom_support *bottom; 2463 struct fsf_qtcb_bottom_support *bottom;
2404 int direction, retval = -EIO, bytes; 2464 int direction, retval = -EIO, bytes;
@@ -2417,12 +2477,12 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2417 return ERR_PTR(-EINVAL); 2477 return ERR_PTR(-EINVAL);
2418 } 2478 }
2419 2479
2420 spin_lock(&adapter->req_q.lock); 2480 spin_lock_bh(&adapter->req_q_lock);
2421 if (zfcp_fsf_req_sbal_get(adapter)) 2481 if (zfcp_fsf_req_sbal_get(adapter))
2422 goto out; 2482 goto out;
2423 2483
2424 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); 2484 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
2425 if (unlikely(IS_ERR(req))) { 2485 if (IS_ERR(req)) {
2426 retval = -EPERM; 2486 retval = -EPERM;
2427 goto out; 2487 goto out;
2428 } 2488 }
@@ -2447,7 +2507,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2507 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2448 retval = zfcp_fsf_req_send(req); 2508 retval = zfcp_fsf_req_send(req);
2449out: 2509out:
2450 spin_unlock(&adapter->req_q.lock); 2510 spin_unlock_bh(&adapter->req_q_lock);
2451 2511
2452 if (!retval) { 2512 if (!retval) {
2453 wait_event(req->completion_wq, 2513 wait_event(req->completion_wq,
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index bf94b4da0763..fd3a88777ac8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -71,13 +71,6 @@
71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041 71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
72#define FSF_ELS_COMMAND_REJECTED 0x00000050 72#define FSF_ELS_COMMAND_REJECTED 0x00000050
73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051 73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
74#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
75#define FSF_AUTHORIZATION_FAILURE 0x00000053
76#define FSF_CFDC_ERROR_DETECTED 0x00000054
77#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
78#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
79#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
80#define FSF_CONFLICTS_OVERRULED 0x00000058
81#define FSF_PORT_BOXED 0x00000059 74#define FSF_PORT_BOXED 0x00000059
82#define FSF_LUN_BOXED 0x0000005A 75#define FSF_LUN_BOXED 0x0000005A
83#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B 76#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
@@ -85,9 +78,7 @@
85#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 78#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
86#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 79#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
87#define FSF_SBAL_MISMATCH 0x00000063 80#define FSF_SBAL_MISMATCH 0x00000063
88#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
89#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 81#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
90#define FSF_FCP_RSP_AVAILABLE 0x000000AF
91#define FSF_UNKNOWN_COMMAND 0x000000E2 82#define FSF_UNKNOWN_COMMAND 0x000000E2
92#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 83#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
93#define FSF_INVALID_COMMAND_OPTION 0x000000E5 84#define FSF_INVALID_COMMAND_OPTION 0x000000E5
@@ -102,20 +93,9 @@
102#define FSF_SQ_RETRY_IF_POSSIBLE 0x02 93#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
103#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03 94#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
104#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04 95#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
105#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
106#define FSF_SQ_COMMAND_ABORTED 0x06 96#define FSF_SQ_COMMAND_ABORTED 0x06
107#define FSF_SQ_NO_RETRY_POSSIBLE 0x07 97#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
108 98
109/* FSF status qualifier for CFDC commands */
110#define FSF_SQ_CFDC_HARDENED_ON_SE 0x00000000
111#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
112#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
113/* CFDC subtable codes */
114#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
115#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
116#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
117#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
118
119/* FSF status qualifier (most significant 4 bytes), local link down */ 99/* FSF status qualifier (most significant 4 bytes), local link down */
120#define FSF_PSQ_LINK_NO_LIGHT 0x00000004 100#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
121#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008 101#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
@@ -145,7 +125,6 @@
145#define FSF_STATUS_READ_LINK_UP 0x00000006 125#define FSF_STATUS_READ_LINK_UP 0x00000006
146#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009 126#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
147#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 127#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
148#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
149#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C 128#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
150 129
151/* status subtypes in status read buffer */ 130/* status subtypes in status read buffer */
@@ -159,20 +138,9 @@
159 138
160/* status subtypes for unsolicited status notification lost */ 139/* status subtypes for unsolicited status notification lost */
161#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001 140#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
162#define FSF_STATUS_READ_SUB_SENSE_DATA 0x00000002
163#define FSF_STATUS_READ_SUB_LINK_STATUS 0x00000004
164#define FSF_STATUS_READ_SUB_PORT_CLOSED 0x00000008
165#define FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD 0x00000010
166#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020 141#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
167#define FSF_STATUS_READ_SUB_ACT_HARDENED 0x00000040
168#define FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT 0x00000080
169
170/* status subtypes for CFDC */
171#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
172#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
173 142
174/* topologie that is detected by the adapter */ 143/* topologie that is detected by the adapter */
175#define FSF_TOPO_ERROR 0x00000000
176#define FSF_TOPO_P2P 0x00000001 144#define FSF_TOPO_P2P 0x00000001
177#define FSF_TOPO_FABRIC 0x00000002 145#define FSF_TOPO_FABRIC 0x00000002
178#define FSF_TOPO_AL 0x00000003 146#define FSF_TOPO_AL 0x00000003
@@ -180,17 +148,13 @@
180/* data direction for FCP commands */ 148/* data direction for FCP commands */
181#define FSF_DATADIR_WRITE 0x00000001 149#define FSF_DATADIR_WRITE 0x00000001
182#define FSF_DATADIR_READ 0x00000002 150#define FSF_DATADIR_READ 0x00000002
183#define FSF_DATADIR_READ_WRITE 0x00000003
184#define FSF_DATADIR_CMND 0x00000004 151#define FSF_DATADIR_CMND 0x00000004
185 152
186/* fc service class */ 153/* fc service class */
187#define FSF_CLASS_1 0x00000001
188#define FSF_CLASS_2 0x00000002
189#define FSF_CLASS_3 0x00000003 154#define FSF_CLASS_3 0x00000003
190 155
191/* SBAL chaining */ 156/* SBAL chaining */
192#define FSF_MAX_SBALS_PER_REQ 36 157#define FSF_MAX_SBALS_PER_REQ 36
193#define FSF_MAX_SBALS_PER_ELS_REQ 2
194 158
195/* logging space behind QTCB */ 159/* logging space behind QTCB */
196#define FSF_QTCB_LOG_SIZE 1024 160#define FSF_QTCB_LOG_SIZE 1024
@@ -200,50 +164,16 @@
200#define FSF_FEATURE_LUN_SHARING 0x00000004 164#define FSF_FEATURE_LUN_SHARING 0x00000004
201#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008 165#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
202#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 166#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
203#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
204#define FSF_FEATURE_UPDATE_ALERT 0x00000100 167#define FSF_FEATURE_UPDATE_ALERT 0x00000100
205#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 168#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
206 169
207/* host connection features */ 170/* host connection features */
208#define FSF_FEATURE_NPIV_MODE 0x00000001 171#define FSF_FEATURE_NPIV_MODE 0x00000001
209#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
210 172
211/* option */ 173/* option */
212#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 174#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
213#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
214
215/* adapter types */
216#define FSF_ADAPTER_TYPE_FICON 0x00000001
217#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
218
219/* port types */
220#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
221#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
222#define FSF_HBA_PORTTYPE_NPORT 0x00000005
223#define FSF_HBA_PORTTYPE_PTP 0x00000021
224/* following are not defined and used by FSF Spec
225 but are additionally defined by FC-HBA */
226#define FSF_HBA_PORTTYPE_OTHER 0x00000002
227#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
228#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
229#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
230#define FSF_HBA_PORTTYPE_FPORT 0x00000008
231#define FSF_HBA_PORTTYPE_LPORT 0x00000020
232
233/* port states */
234#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
235#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
236#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
237#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
238#define FSF_HBA_PORTSTATE_ERROR 0x00000007
239
240/* IO states of adapter */
241#define FSF_IOSTAT_NPORT_RJT 0x00000004
242#define FSF_IOSTAT_FABRIC_RJT 0x00000005
243#define FSF_IOSTAT_LS_RJT 0x00000009
244 175
245/* open LUN access flags*/ 176/* open LUN access flags*/
246#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
247#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 177#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
248#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 178#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
249 179
@@ -265,11 +195,6 @@ struct fsf_queue_designator {
265 u32 res1; 195 u32 res1;
266} __attribute__ ((packed)); 196} __attribute__ ((packed));
267 197
268struct fsf_port_closed_payload {
269 struct fsf_queue_designator queue_designator;
270 u32 port_handle;
271} __attribute__ ((packed));
272
273struct fsf_bit_error_payload { 198struct fsf_bit_error_payload {
274 u32 res1; 199 u32 res1;
275 u32 link_failure_error_count; 200 u32 link_failure_error_count;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index d6dbd653fde9..3e05080e62d4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -28,7 +28,7 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 28 return 0;
29} 29}
30 30
31static volatile struct qdio_buffer_element * 31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) 32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{ 33{
34 return &q->sbal[sbal_idx]->element[sbale_idx]; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
@@ -57,7 +57,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
57 57
58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
59{ 59{
60 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); 60 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
61 61
62 zfcp_erp_adapter_reopen(adapter, 62 zfcp_erp_adapter_reopen(adapter,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
@@ -145,7 +145,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145{ 145{
146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
147 struct zfcp_qdio_queue *queue = &adapter->resp_q; 147 struct zfcp_qdio_queue *queue = &adapter->resp_q;
148 volatile struct qdio_buffer_element *sbale; 148 struct qdio_buffer_element *sbale;
149 int sbal_idx, sbale_idx, sbal_no; 149 int sbal_idx, sbale_idx, sbal_no;
150 150
151 if (unlikely(qdio_err)) { 151 if (unlikely(qdio_err)) {
@@ -174,8 +174,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
174 174
175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) 175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
176 dev_warn(&adapter->ccw_device->dev, 176 dev_warn(&adapter->ccw_device->dev,
177 "Protocol violation by adapter. " 177 "A QDIO protocol error occurred, "
178 "Continuing operations.\n"); 178 "operations continue\n");
179 } 179 }
180 180
181 /* 181 /*
@@ -190,8 +190,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
190 * @fsf_req: pointer to struct fsf_req 190 * @fsf_req: pointer to struct fsf_req
191 * Returns: pointer to qdio_buffer_element (SBALE) structure 191 * Returns: pointer to qdio_buffer_element (SBALE) structure
192 */ 192 */
193volatile struct qdio_buffer_element * 193struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
194zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
195{ 194{
196 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); 195 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
197} 196}
@@ -201,8 +200,7 @@ zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
201 * @fsf_req: pointer to struct fsf_req 200 * @fsf_req: pointer to struct fsf_req
202 * Returns: pointer to qdio_buffer_element (SBALE) structure 201 * Returns: pointer to qdio_buffer_element (SBALE) structure
203 */ 202 */
204volatile struct qdio_buffer_element * 203struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
205zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
206{ 204{
207 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 205 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
208 req->sbale_curr); 206 req->sbale_curr);
@@ -216,10 +214,10 @@ static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
216 % QDIO_MAX_BUFFERS_PER_Q; 214 % QDIO_MAX_BUFFERS_PER_Q;
217} 215}
218 216
219static volatile struct qdio_buffer_element * 217static struct qdio_buffer_element *
220zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 218zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
221{ 219{
222 volatile struct qdio_buffer_element *sbale; 220 struct qdio_buffer_element *sbale;
223 221
224 /* set last entry flag in current SBALE of current SBAL */ 222 /* set last entry flag in current SBALE of current SBAL */
225 sbale = zfcp_qdio_sbale_curr(fsf_req); 223 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -250,7 +248,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
250 return sbale; 248 return sbale;
251} 249}
252 250
253static volatile struct qdio_buffer_element * 251static struct qdio_buffer_element *
254zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 252zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
255{ 253{
256 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 254 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -273,7 +271,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
273 unsigned int sbtype, void *start_addr, 271 unsigned int sbtype, void *start_addr,
274 unsigned int total_length) 272 unsigned int total_length)
275{ 273{
276 volatile struct qdio_buffer_element *sbale; 274 struct qdio_buffer_element *sbale;
277 unsigned long remaining, length; 275 unsigned long remaining, length;
278 void *addr; 276 void *addr;
279 277
@@ -282,6 +280,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
282 addr += length, remaining -= length) { 280 addr += length, remaining -= length) {
283 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); 281 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
284 if (!sbale) { 282 if (!sbale) {
283 atomic_inc(&fsf_req->adapter->qdio_outb_full);
285 zfcp_qdio_undo_sbals(fsf_req); 284 zfcp_qdio_undo_sbals(fsf_req);
286 return -EINVAL; 285 return -EINVAL;
287 } 286 }
@@ -307,7 +306,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
307int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 306int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
308 struct scatterlist *sg, int max_sbals) 307 struct scatterlist *sg, int max_sbals)
309{ 308{
310 volatile struct qdio_buffer_element *sbale; 309 struct qdio_buffer_element *sbale;
311 int retval, bytes = 0; 310 int retval, bytes = 0;
312 311
313 /* figure out last allowed SBAL */ 312 /* figure out last allowed SBAL */
@@ -344,10 +343,10 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
344 int first = fsf_req->sbal_first; 343 int first = fsf_req->sbal_first;
345 int count = fsf_req->sbal_number; 344 int count = fsf_req->sbal_number;
346 int retval, pci, pci_batch; 345 int retval, pci, pci_batch;
347 volatile struct qdio_buffer_element *sbale; 346 struct qdio_buffer_element *sbale;
348 347
349 /* acknowledgements for transferred buffers */ 348 /* acknowledgements for transferred buffers */
350 pci_batch = req_q->pci_batch + count; 349 pci_batch = adapter->req_q_pci_batch + count;
351 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { 350 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
352 pci_batch %= ZFCP_QDIO_PCI_INTERVAL; 351 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
353 pci = first + count - (pci_batch + 1); 352 pci = first + count - (pci_batch + 1);
@@ -367,7 +366,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
367 atomic_sub(count, &req_q->count); 366 atomic_sub(count, &req_q->count);
368 req_q->first += count; 367 req_q->first += count;
369 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 368 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
370 req_q->pci_batch = pci_batch; 369 adapter->req_q_pci_batch = pci_batch;
371 return 0; 370 return 0;
372} 371}
373 372
@@ -418,14 +417,14 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
418 struct zfcp_qdio_queue *req_q; 417 struct zfcp_qdio_queue *req_q;
419 int first, count; 418 int first, count;
420 419
421 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 420 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
422 return; 421 return;
423 422
424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 423 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
425 req_q = &adapter->req_q; 424 req_q = &adapter->req_q;
426 spin_lock(&req_q->lock); 425 spin_lock_bh(&adapter->req_q_lock);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 426 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
428 spin_unlock(&req_q->lock); 427 spin_unlock_bh(&adapter->req_q_lock);
429 428
430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 429 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
431 430
@@ -438,7 +437,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
438 } 437 }
439 req_q->first = 0; 438 req_q->first = 0;
440 atomic_set(&req_q->count, 0); 439 atomic_set(&req_q->count, 0);
441 req_q->pci_batch = 0; 440 adapter->req_q_pci_batch = 0;
442 adapter->resp_q.first = 0; 441 adapter->resp_q.first = 0;
443 atomic_set(&adapter->resp_q.count, 0); 442 atomic_set(&adapter->resp_q.count, 0);
444} 443}
@@ -450,23 +449,17 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
450 */ 449 */
451int zfcp_qdio_open(struct zfcp_adapter *adapter) 450int zfcp_qdio_open(struct zfcp_adapter *adapter)
452{ 451{
453 volatile struct qdio_buffer_element *sbale; 452 struct qdio_buffer_element *sbale;
454 int cc; 453 int cc;
455 454
456 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 455 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
457 return -EIO; 456 return -EIO;
458 457
459 if (qdio_establish(&adapter->qdio_init_data)) { 458 if (qdio_establish(&adapter->qdio_init_data))
460 dev_err(&adapter->ccw_device->dev, 459 goto failed_establish;
461 "Establish of QDIO queues failed.\n");
462 return -EIO;
463 }
464 460
465 if (qdio_activate(adapter->ccw_device)) { 461 if (qdio_activate(adapter->ccw_device))
466 dev_err(&adapter->ccw_device->dev,
467 "Activate of QDIO queues failed.\n");
468 goto failed_qdio; 462 goto failed_qdio;
469 }
470 463
471 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 464 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
472 sbale = &(adapter->resp_q.sbal[cc]->element[0]); 465 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
@@ -476,20 +469,20 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
476 } 469 }
477 470
478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 471 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
479 QDIO_MAX_BUFFERS_PER_Q)) { 472 QDIO_MAX_BUFFERS_PER_Q))
480 dev_err(&adapter->ccw_device->dev,
481 "Init of QDIO response queue failed.\n");
482 goto failed_qdio; 473 goto failed_qdio;
483 }
484 474
485 /* set index of first avalable SBALS / number of available SBALS */ 475 /* set index of first avalable SBALS / number of available SBALS */
486 adapter->req_q.first = 0; 476 adapter->req_q.first = 0;
487 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 477 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
488 adapter->req_q.pci_batch = 0; 478 adapter->req_q_pci_batch = 0;
489 479
490 return 0; 480 return 0;
491 481
492failed_qdio: 482failed_qdio:
493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 483 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
484failed_establish:
485 dev_err(&adapter->ccw_device->dev,
486 "Setting up the QDIO connection to the FCP adapter failed\n");
494 return -EIO; 487 return -EIO;
495} 488}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index aeae56b00b45..ca8f85f3dad4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -21,20 +21,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
21 return fcp_sns_info_ptr; 21 return fcp_sns_info_ptr;
22} 22}
23 23
24void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
25{
26 fcp_dl_t *fcp_dl_ptr;
27
28 /*
29 * fcp_dl_addr = start address of fcp_cmnd structure +
30 * size of fixed part + size of dynamically sized add_dcp_cdb field
31 * SEE FCP-2 documentation
32 */
33 fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
34 (fcp_cmd->add_fcp_cdb_length << 2));
35 *fcp_dl_ptr = fcp_dl;
36}
37
38static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 24static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 25{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -119,13 +105,17 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
119{ 105{
120 struct zfcp_port *port; 106 struct zfcp_port *port;
121 struct zfcp_unit *unit; 107 struct zfcp_unit *unit;
108 int scsi_lun;
122 109
123 list_for_each_entry(port, &adapter->port_list_head, list) { 110 list_for_each_entry(port, &adapter->port_list_head, list) {
124 if (!port->rport || (id != port->rport->scsi_target_id)) 111 if (!port->rport || (id != port->rport->scsi_target_id))
125 continue; 112 continue;
126 list_for_each_entry(unit, &port->unit_list_head, list) 113 list_for_each_entry(unit, &port->unit_list_head, list) {
127 if (lun == unit->scsi_lun) 114 scsi_lun = scsilun_to_int(
115 (struct scsi_lun *)&unit->fcp_lun);
116 if (lun == scsi_lun)
128 return unit; 117 return unit;
118 }
129 } 119 }
130 120
131 return NULL; 121 return NULL;
@@ -183,7 +173,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
183 return retval; 173 return retval;
184 } 174 }
185 fsf_req->data = NULL; 175 fsf_req->data = NULL;
186 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
187 176
188 /* don't access old fsf_req after releasing the abort_lock */ 177 /* don't access old fsf_req after releasing the abort_lock */
189 write_unlock_irqrestore(&adapter->abort_lock, flags); 178 write_unlock_irqrestore(&adapter->abort_lock, flags);
@@ -294,7 +283,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
294 sizeof (struct zfcp_adapter *)); 283 sizeof (struct zfcp_adapter *));
295 if (!adapter->scsi_host) { 284 if (!adapter->scsi_host) {
296 dev_err(&adapter->ccw_device->dev, 285 dev_err(&adapter->ccw_device->dev,
297 "registration with SCSI stack failed."); 286 "Registering the FCP device with the "
287 "SCSI stack failed\n");
298 return -EIO; 288 return -EIO;
299 } 289 }
300 290
@@ -312,7 +302,6 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
312 scsi_host_put(adapter->scsi_host); 302 scsi_host_put(adapter->scsi_host);
313 return -EIO; 303 return -EIO;
314 } 304 }
315 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
316 305
317 return 0; 306 return 0;
318} 307}
@@ -336,7 +325,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
336 scsi_remove_host(shost); 325 scsi_remove_host(shost);
337 scsi_host_put(shost); 326 scsi_host_put(shost);
338 adapter->scsi_host = NULL; 327 adapter->scsi_host = NULL;
339 atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
340 328
341 return; 329 return;
342} 330}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2e85c6c49e7d..ca9293ba1766 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -26,9 +26,9 @@ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
27 atomic_read(&adapter->status)); 27 atomic_read(&adapter->status));
28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
29 adapter->peer_wwnn); 29 (unsigned long long) adapter->peer_wwnn);
30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
31 adapter->peer_wwpn); 31 (unsigned long long) adapter->peer_wwpn);
32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
33 adapter->peer_d_id); 33 adapter->peer_d_id);
34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
@@ -135,8 +135,9 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
135{ 135{
136 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 136 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
137 struct zfcp_port *port; 137 struct zfcp_port *port;
138 wwn_t wwpn; 138 u64 wwpn;
139 int retval = 0; 139 int retval = 0;
140 LIST_HEAD(port_remove_lh);
140 141
141 down(&zfcp_data.config_sema); 142 down(&zfcp_data.config_sema);
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 143 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -144,7 +145,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
144 goto out; 145 goto out;
145 } 146 }
146 147
147 if (strict_strtoull(buf, 0, &wwpn)) { 148 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
148 retval = -EINVAL; 149 retval = -EINVAL;
149 goto out; 150 goto out;
150 } 151 }
@@ -154,7 +155,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
154 if (port && (atomic_read(&port->refcount) == 0)) { 155 if (port && (atomic_read(&port->refcount) == 0)) {
155 zfcp_port_get(port); 156 zfcp_port_get(port);
156 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 157 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
157 list_move(&port->list, &adapter->port_remove_lh); 158 list_move(&port->list, &port_remove_lh);
158 } else 159 } else
159 port = NULL; 160 port = NULL;
160 write_unlock_irq(&zfcp_data.config_lock); 161 write_unlock_irq(&zfcp_data.config_lock);
@@ -200,7 +201,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
200{ 201{
201 struct zfcp_port *port = dev_get_drvdata(dev); 202 struct zfcp_port *port = dev_get_drvdata(dev);
202 struct zfcp_unit *unit; 203 struct zfcp_unit *unit;
203 fcp_lun_t fcp_lun; 204 u64 fcp_lun;
204 int retval = -EINVAL; 205 int retval = -EINVAL;
205 206
206 down(&zfcp_data.config_sema); 207 down(&zfcp_data.config_sema);
@@ -209,7 +210,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
209 goto out; 210 goto out;
210 } 211 }
211 212
212 if (strict_strtoull(buf, 0, &fcp_lun)) 213 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
213 goto out; 214 goto out;
214 215
215 unit = zfcp_unit_enqueue(port, fcp_lun); 216 unit = zfcp_unit_enqueue(port, fcp_lun);
@@ -233,8 +234,9 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
233{ 234{
234 struct zfcp_port *port = dev_get_drvdata(dev); 235 struct zfcp_port *port = dev_get_drvdata(dev);
235 struct zfcp_unit *unit; 236 struct zfcp_unit *unit;
236 fcp_lun_t fcp_lun; 237 u64 fcp_lun;
237 int retval = 0; 238 int retval = 0;
239 LIST_HEAD(unit_remove_lh);
238 240
239 down(&zfcp_data.config_sema); 241 down(&zfcp_data.config_sema);
240 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 242 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -242,7 +244,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
242 goto out; 244 goto out;
243 } 245 }
244 246
245 if (strict_strtoull(buf, 0, &fcp_lun)) { 247 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
246 retval = -EINVAL; 248 retval = -EINVAL;
247 goto out; 249 goto out;
248 } 250 }
@@ -252,7 +254,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
252 if (unit && (atomic_read(&unit->refcount) == 0)) { 254 if (unit && (atomic_read(&unit->refcount) == 0)) {
253 zfcp_unit_get(unit); 255 zfcp_unit_get(unit);
254 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 256 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
255 list_move(&unit->list, &port->unit_remove_lh); 257 list_move(&unit->list, &unit_remove_lh);
256 } else 258 } else
257 unit = NULL; 259 unit = NULL;
258 260
@@ -273,22 +275,7 @@ out:
273} 275}
274static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 276static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
275 277
276static struct attribute *zfcp_port_ns_attrs[] = { 278static struct attribute *zfcp_port_attrs[] = {
277 &dev_attr_port_failed.attr,
278 &dev_attr_port_in_recovery.attr,
279 &dev_attr_port_status.attr,
280 &dev_attr_port_access_denied.attr,
281 NULL
282};
283
284/**
285 * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
286 */
287struct attribute_group zfcp_sysfs_ns_port_attrs = {
288 .attrs = zfcp_port_ns_attrs,
289};
290
291static struct attribute *zfcp_port_no_ns_attrs[] = {
292 &dev_attr_unit_add.attr, 279 &dev_attr_unit_add.attr,
293 &dev_attr_unit_remove.attr, 280 &dev_attr_unit_remove.attr,
294 &dev_attr_port_failed.attr, 281 &dev_attr_port_failed.attr,
@@ -302,7 +289,7 @@ static struct attribute *zfcp_port_no_ns_attrs[] = {
302 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports 289 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
303 */ 290 */
304struct attribute_group zfcp_sysfs_port_attrs = { 291struct attribute_group zfcp_sysfs_port_attrs = {
305 .attrs = zfcp_port_no_ns_attrs, 292 .attrs = zfcp_port_attrs,
306}; 293};
307 294
308static struct attribute *zfcp_unit_attrs[] = { 295static struct attribute *zfcp_unit_attrs[] = {
@@ -394,9 +381,11 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
394static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 381static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
395 382
396ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 383ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
397 unit->port->adapter->ccw_device->dev.bus_id); 384 dev_name(&unit->port->adapter->ccw_device->dev));
398ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn); 385ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
399ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun); 386 (unsigned long long) unit->port->wwpn);
387ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
388 (unsigned long long) unit->fcp_lun);
400 389
401struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 390struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
402 &dev_attr_fcp_lun, 391 &dev_attr_fcp_lun,
@@ -487,10 +476,23 @@ ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
487ZFCP_SHOST_ATTR(seconds_active, "%llu\n", 476ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
488 (unsigned long long) stat_info.seconds_act); 477 (unsigned long long) stat_info.seconds_act);
489 478
479static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
480 struct device_attribute *attr,
481 char *buf)
482{
483 struct Scsi_Host *scsi_host = class_to_shost(dev);
484 struct zfcp_adapter *adapter =
485 (struct zfcp_adapter *) scsi_host->hostdata[0];
486
487 return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
488}
489static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
490
490struct device_attribute *zfcp_sysfs_shost_attrs[] = { 491struct device_attribute *zfcp_sysfs_shost_attrs[] = {
491 &dev_attr_utilization, 492 &dev_attr_utilization,
492 &dev_attr_requests, 493 &dev_attr_requests,
493 &dev_attr_megabytes, 494 &dev_attr_megabytes,
494 &dev_attr_seconds_active, 495 &dev_attr_seconds_active,
496 &dev_attr_queue_full,
495 NULL 497 NULL
496}; 498};