aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c352
-rw-r--r--drivers/s390/block/dasd_3990_erp.c66
-rw-r--r--drivers/s390/block/dasd_alias.c80
-rw-r--r--drivers/s390/block/dasd_devmap.c27
-rw-r--r--drivers/s390/block/dasd_diag.c48
-rw-r--r--drivers/s390/block/dasd_eckd.c217
-rw-r--r--drivers/s390/block/dasd_eckd.h4
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_fba.c21
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h21
-rw-r--r--drivers/s390/block/dasd_ioctl.c32
-rw-r--r--drivers/s390/block/dasd_proc.c124
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/con3215.c18
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/fs3270.c32
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/monreader.c11
-rw-r--r--drivers/s390/char/monwriter.c10
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_async.c7
-rw-r--r--drivers/s390/char/sclp_cmd.c10
-rw-r--r--drivers/s390/char/sclp_con.c1
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c5
-rw-r--r--drivers/s390/char/tape.h9
-rw-r--r--drivers/s390/char/tape_34xx.c10
-rw-r--r--drivers/s390/char/tape_3590.c6
-rw-r--r--drivers/s390/char/tape_block.c60
-rw-r--r--drivers/s390/char/tape_char.c75
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/char/tape_core.c68
-rw-r--r--drivers/s390/char/tape_proc.c5
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/tty3270.c20
-rw-r--r--drivers/s390/char/vmcp.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c11
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/char/vmwatchdog.c30
-rw-r--r--drivers/s390/char/zcore.c201
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/blacklist.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/ccwreq.c325
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.c31
-rw-r--r--drivers/s390/cio/chsc_sch.c30
-rw-r--r--drivers/s390/cio/cio.c33
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/crw.c29
-rw-r--r--drivers/s390/cio/css.c152
-rw-r--r--drivers/s390/cio/css.h8
-rw-r--r--drivers/s390/cio/device.c1160
-rw-r--r--drivers/s390/cio/device.h28
-rw-r--r--drivers/s390/cio/device_fsm.c456
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c970
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/io_sch.h74
-rw-r--r--drivers/s390/cio/qdio.h120
-rw-r--r--drivers/s390/cio/qdio_debug.c136
-rw-r--r--drivers/s390/cio/qdio_main.c108
-rw-r--r--drivers/s390/cio/qdio_perf.c147
-rw-r--r--drivers/s390/cio/qdio_perf.h61
-rw-r--r--drivers/s390/cio/qdio_setup.c30
-rw-r--r--drivers/s390/cio/qdio_thinint.c13
-rw-r--r--drivers/s390/crypto/ap_bus.c32
-rw-r--r--drivers/s390/crypto/ap_bus.h18
-rw-r--r--drivers/s390/crypto/zcrypt_api.c164
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c76
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c5
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c41
-rw-r--r--drivers/s390/kvm/kvm_virtio.c5
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/Makefile7
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_dbug.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c168
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c12
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c116
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c14
-rw-r--r--drivers/s390/net/qeth_core.h16
-rw-r--r--drivers/s390/net/qeth_core_main.c403
-rw-r--r--drivers/s390/net/qeth_core_mpc.h91
-rw-r--r--drivers/s390/net/qeth_core_sys.c100
-rw-r--r--drivers/s390/net/qeth_l2_main.c80
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c343
-rw-r--r--drivers/s390/net/qeth_l3_sys.c125
-rw-r--r--drivers/s390/net/smsgiucv.c18
-rw-r--r--drivers/s390/net/smsgiucv.h8
-rw-r--r--drivers/s390/net/smsgiucv_app.c212
-rw-r--r--drivers/s390/scsi/zfcp_aux.c445
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c182
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c36
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c157
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h44
-rw-r--r--drivers/s390/scsi/zfcp_def.h437
-rw-r--r--drivers/s390/scsi/zfcp_erp.c162
-rw-r--r--drivers/s390/scsi/zfcp_ext.h48
-rw-r--r--drivers/s390/scsi/zfcp_fc.c714
-rw-r--r--drivers/s390/scsi/zfcp_fc.h262
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c520
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h53
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c53
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h109
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c180
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c246
127 files changed, 6762 insertions, 5560 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aaccc8ecfa8f..fa2339cb1681 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -20,12 +20,13 @@
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/hdreg.h> 21#include <linux/hdreg.h>
22#include <linux/async.h> 22#include <linux/async.h>
23#include <linux/mutex.h>
23 24
24#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
26#include <asm/idals.h> 27#include <asm/idals.h>
27#include <asm/todclk.h>
28#include <asm/itcw.h> 28#include <asm/itcw.h>
29#include <asm/diag.h>
29 30
30/* This is ugly... */ 31/* This is ugly... */
31#define PRINTK_HEADER "dasd:" 32#define PRINTK_HEADER "dasd:"
@@ -36,6 +37,9 @@
36 */ 37 */
37#define DASD_CHANQ_MAX_SIZE 4 38#define DASD_CHANQ_MAX_SIZE 4
38 39
40#define DASD_SLEEPON_START_TAG (void *) 1
41#define DASD_SLEEPON_END_TAG (void *) 2
42
39/* 43/*
40 * SECTION: exported variables of dasd.c 44 * SECTION: exported variables of dasd.c
41 */ 45 */
@@ -64,6 +68,7 @@ static void do_restore_device(struct work_struct *);
64static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 68static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65static void dasd_device_timeout(unsigned long); 69static void dasd_device_timeout(unsigned long);
66static void dasd_block_timeout(unsigned long); 70static void dasd_block_timeout(unsigned long);
71static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
67 72
68/* 73/*
69 * SECTION: Operations on the device structure. 74 * SECTION: Operations on the device structure.
@@ -112,6 +117,7 @@ struct dasd_device *dasd_alloc_device(void)
112 INIT_WORK(&device->restore_device, do_restore_device); 117 INIT_WORK(&device->restore_device, do_restore_device);
113 device->state = DASD_STATE_NEW; 118 device->state = DASD_STATE_NEW;
114 device->target = DASD_STATE_NEW; 119 device->target = DASD_STATE_NEW;
120 mutex_init(&device->state_mutex);
115 121
116 return device; 122 return device;
117} 123}
@@ -321,8 +327,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
321 device->state = DASD_STATE_READY; 327 device->state = DASD_STATE_READY;
322 return rc; 328 return rc;
323 } 329 }
324 dasd_destroy_partitions(block);
325 dasd_flush_request_queue(block); 330 dasd_flush_request_queue(block);
331 dasd_destroy_partitions(block);
326 block->blocks = 0; 332 block->blocks = 0;
327 block->bp_block = 0; 333 block->bp_block = 0;
328 block->s2b_shift = 0; 334 block->s2b_shift = 0;
@@ -484,10 +490,8 @@ static void dasd_change_state(struct dasd_device *device)
484 if (rc) 490 if (rc)
485 device->target = device->state; 491 device->target = device->state;
486 492
487 if (device->state == device->target) { 493 if (device->state == device->target)
488 wake_up(&dasd_init_waitq); 494 wake_up(&dasd_init_waitq);
489 dasd_put_device(device);
490 }
491 495
492 /* let user-space know that the device status changed */ 496 /* let user-space know that the device status changed */
493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 497 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@@ -502,7 +506,9 @@ static void dasd_change_state(struct dasd_device *device)
502static void do_kick_device(struct work_struct *work) 506static void do_kick_device(struct work_struct *work)
503{ 507{
504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 508 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
509 mutex_lock(&device->state_mutex);
505 dasd_change_state(device); 510 dasd_change_state(device);
511 mutex_unlock(&device->state_mutex);
506 dasd_schedule_device_bh(device); 512 dasd_schedule_device_bh(device);
507 dasd_put_device(device); 513 dasd_put_device(device);
508} 514}
@@ -539,18 +545,19 @@ void dasd_restore_device(struct dasd_device *device)
539void dasd_set_target_state(struct dasd_device *device, int target) 545void dasd_set_target_state(struct dasd_device *device, int target)
540{ 546{
541 dasd_get_device(device); 547 dasd_get_device(device);
548 mutex_lock(&device->state_mutex);
542 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 549 /* If we are in probeonly mode stop at DASD_STATE_READY. */
543 if (dasd_probeonly && target > DASD_STATE_READY) 550 if (dasd_probeonly && target > DASD_STATE_READY)
544 target = DASD_STATE_READY; 551 target = DASD_STATE_READY;
545 if (device->target != target) { 552 if (device->target != target) {
546 if (device->state == target) { 553 if (device->state == target)
547 wake_up(&dasd_init_waitq); 554 wake_up(&dasd_init_waitq);
548 dasd_put_device(device);
549 }
550 device->target = target; 555 device->target = target;
551 } 556 }
552 if (device->state != device->target) 557 if (device->state != device->target)
553 dasd_change_state(device); 558 dasd_change_state(device);
559 mutex_unlock(&device->state_mutex);
560 dasd_put_device(device);
554} 561}
555 562
556/* 563/*
@@ -960,7 +967,7 @@ static void dasd_device_timeout(unsigned long ptr)
960 device = (struct dasd_device *) ptr; 967 device = (struct dasd_device *) ptr;
961 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 968 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
962 /* re-activate request queue */ 969 /* re-activate request queue */
963 device->stopped &= ~DASD_STOPPED_PENDING; 970 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
964 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 971 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
965 dasd_schedule_device_bh(device); 972 dasd_schedule_device_bh(device);
966} 973}
@@ -994,19 +1001,26 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
994 return; 1001 return;
995 cqr = (struct dasd_ccw_req *) intparm; 1002 cqr = (struct dasd_ccw_req *) intparm;
996 if (cqr->status != DASD_CQR_IN_IO) { 1003 if (cqr->status != DASD_CQR_IN_IO) {
997 DBF_EVENT(DBF_DEBUG, 1004 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
998 "invalid status in handle_killed_request: " 1005 "invalid status in handle_killed_request: "
999 "bus_id %s, status %02x", 1006 "%02x", cqr->status);
1000 dev_name(&cdev->dev), cqr->status);
1001 return; 1007 return;
1002 } 1008 }
1003 1009
1004 device = (struct dasd_device *) cqr->startdev; 1010 device = dasd_device_from_cdev_locked(cdev);
1005 if (device == NULL || 1011 if (IS_ERR(device)) {
1006 device != dasd_device_from_cdev_locked(cdev) || 1012 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1007 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1013 "unable to get device from cdev");
1008 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1014 return;
1009 "bus_id %s", dev_name(&cdev->dev)); 1015 }
1016
1017 if (!cqr->startdev ||
1018 device != cqr->startdev ||
1019 strncmp(cqr->startdev->discipline->ebcname,
1020 (char *) &cqr->magic, 4)) {
1021 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1022 "invalid device in request");
1023 dasd_put_device(device);
1010 return; 1024 return;
1011 } 1025 }
1012 1026
@@ -1023,7 +1037,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1023 /* First of all start sense subsystem status request. */ 1037 /* First of all start sense subsystem status request. */
1024 dasd_eer_snss(device); 1038 dasd_eer_snss(device);
1025 1039
1026 device->stopped &= ~DASD_STOPPED_PENDING; 1040 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1027 dasd_schedule_device_bh(device); 1041 dasd_schedule_device_bh(device);
1028 if (device->block) 1042 if (device->block)
1029 dasd_schedule_block_bh(device->block); 1043 dasd_schedule_block_bh(device->block);
@@ -1045,12 +1059,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1045 case -EIO: 1059 case -EIO:
1046 break; 1060 break;
1047 case -ETIMEDOUT: 1061 case -ETIMEDOUT:
1048 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", 1062 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1049 __func__, dev_name(&cdev->dev)); 1063 "request timed out\n", __func__);
1050 break; 1064 break;
1051 default: 1065 default:
1052 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", 1066 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1053 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1067 "unknown error %ld\n", __func__,
1068 PTR_ERR(irb));
1054 } 1069 }
1055 dasd_handle_killed_request(cdev, intparm); 1070 dasd_handle_killed_request(cdev, intparm);
1056 return; 1071 return;
@@ -1078,8 +1093,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1078 device = (struct dasd_device *) cqr->startdev; 1093 device = (struct dasd_device *) cqr->startdev;
1079 if (!device || 1094 if (!device ||
1080 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1095 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1081 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1096 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1082 "bus_id %s", dev_name(&cdev->dev)); 1097 "invalid device in request");
1083 return; 1098 return;
1084 } 1099 }
1085 1100
@@ -1405,6 +1420,20 @@ void dasd_schedule_device_bh(struct dasd_device *device)
1405 tasklet_hi_schedule(&device->tasklet); 1420 tasklet_hi_schedule(&device->tasklet);
1406} 1421}
1407 1422
1423void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1424{
1425 device->stopped |= bits;
1426}
1427EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1428
1429void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1430{
1431 device->stopped &= ~bits;
1432 if (!device->stopped)
1433 wake_up(&generic_waitq);
1434}
1435EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1436
1408/* 1437/*
1409 * Queue a request to the head of the device ccw_queue. 1438 * Queue a request to the head of the device ccw_queue.
1410 * Start the I/O if possible. 1439 * Start the I/O if possible.
@@ -1446,7 +1475,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1446 */ 1475 */
1447static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1476static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1448{ 1477{
1449 wake_up((wait_queue_head_t *) data); 1478 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1479 cqr->callback_data = DASD_SLEEPON_END_TAG;
1480 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1481 wake_up(&generic_waitq);
1450} 1482}
1451 1483
1452static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1484static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
@@ -1456,67 +1488,141 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1456 1488
1457 device = cqr->startdev; 1489 device = cqr->startdev;
1458 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1490 spin_lock_irq(get_ccwdev_lock(device->cdev));
1459 rc = ((cqr->status == DASD_CQR_DONE || 1491 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1460 cqr->status == DASD_CQR_NEED_ERP ||
1461 cqr->status == DASD_CQR_TERMINATED) &&
1462 list_empty(&cqr->devlist));
1463 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1492 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1464 return rc; 1493 return rc;
1465} 1494}
1466 1495
1467/* 1496/*
1468 * Queue a request to the tail of the device ccw_queue and wait for 1497 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1469 * it's completion.
1470 */ 1498 */
1471int dasd_sleep_on(struct dasd_ccw_req *cqr) 1499static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1472{ 1500{
1473 struct dasd_device *device; 1501 struct dasd_device *device;
1474 int rc; 1502 dasd_erp_fn_t erp_fn;
1475 1503
1504 if (cqr->status == DASD_CQR_FILLED)
1505 return 0;
1476 device = cqr->startdev; 1506 device = cqr->startdev;
1507 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1508 if (cqr->status == DASD_CQR_TERMINATED) {
1509 device->discipline->handle_terminated_request(cqr);
1510 return 1;
1511 }
1512 if (cqr->status == DASD_CQR_NEED_ERP) {
1513 erp_fn = device->discipline->erp_action(cqr);
1514 erp_fn(cqr);
1515 return 1;
1516 }
1517 if (cqr->status == DASD_CQR_FAILED)
1518 dasd_log_sense(cqr, &cqr->irb);
1519 if (cqr->refers) {
1520 __dasd_process_erp(device, cqr);
1521 return 1;
1522 }
1523 }
1524 return 0;
1525}
1477 1526
1478 cqr->callback = dasd_wakeup_cb; 1527static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1479 cqr->callback_data = (void *) &generic_waitq; 1528{
1480 dasd_add_request_tail(cqr); 1529 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1481 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1530 if (cqr->refers) /* erp is not done yet */
1531 return 1;
1532 return ((cqr->status != DASD_CQR_DONE) &&
1533 (cqr->status != DASD_CQR_FAILED));
1534 } else
1535 return (cqr->status == DASD_CQR_FILLED);
1536}
1482 1537
1483 if (cqr->status == DASD_CQR_DONE) 1538static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1539{
1540 struct dasd_device *device;
1541 int rc;
1542 struct list_head ccw_queue;
1543 struct dasd_ccw_req *cqr;
1544
1545 INIT_LIST_HEAD(&ccw_queue);
1546 maincqr->status = DASD_CQR_FILLED;
1547 device = maincqr->startdev;
1548 list_add(&maincqr->blocklist, &ccw_queue);
1549 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1550 cqr = list_first_entry(&ccw_queue,
1551 struct dasd_ccw_req, blocklist)) {
1552
1553 if (__dasd_sleep_on_erp(cqr))
1554 continue;
1555 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1556 continue;
1557
1558 /* Non-temporary stop condition will trigger fail fast */
1559 if (device->stopped & ~DASD_STOPPED_PENDING &&
1560 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1561 (!dasd_eer_enabled(device))) {
1562 cqr->status = DASD_CQR_FAILED;
1563 continue;
1564 }
1565
1566 /* Don't try to start requests if device is stopped */
1567 if (interruptible) {
1568 rc = wait_event_interruptible(
1569 generic_waitq, !(device->stopped));
1570 if (rc == -ERESTARTSYS) {
1571 cqr->status = DASD_CQR_FAILED;
1572 maincqr->intrc = rc;
1573 continue;
1574 }
1575 } else
1576 wait_event(generic_waitq, !(device->stopped));
1577
1578 cqr->callback = dasd_wakeup_cb;
1579 cqr->callback_data = DASD_SLEEPON_START_TAG;
1580 dasd_add_request_tail(cqr);
1581 if (interruptible) {
1582 rc = wait_event_interruptible(
1583 generic_waitq, _wait_for_wakeup(cqr));
1584 if (rc == -ERESTARTSYS) {
1585 dasd_cancel_req(cqr);
1586 /* wait (non-interruptible) for final status */
1587 wait_event(generic_waitq,
1588 _wait_for_wakeup(cqr));
1589 cqr->status = DASD_CQR_FAILED;
1590 maincqr->intrc = rc;
1591 continue;
1592 }
1593 } else
1594 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1595 }
1596
1597 maincqr->endclk = get_clock();
1598 if ((maincqr->status != DASD_CQR_DONE) &&
1599 (maincqr->intrc != -ERESTARTSYS))
1600 dasd_log_sense(maincqr, &maincqr->irb);
1601 if (maincqr->status == DASD_CQR_DONE)
1484 rc = 0; 1602 rc = 0;
1485 else if (cqr->intrc) 1603 else if (maincqr->intrc)
1486 rc = cqr->intrc; 1604 rc = maincqr->intrc;
1487 else 1605 else
1488 rc = -EIO; 1606 rc = -EIO;
1489 return rc; 1607 return rc;
1490} 1608}
1491 1609
1492/* 1610/*
1611 * Queue a request to the tail of the device ccw_queue and wait for
1612 * it's completion.
1613 */
1614int dasd_sleep_on(struct dasd_ccw_req *cqr)
1615{
1616 return _dasd_sleep_on(cqr, 0);
1617}
1618
1619/*
1493 * Queue a request to the tail of the device ccw_queue and wait 1620 * Queue a request to the tail of the device ccw_queue and wait
1494 * interruptible for it's completion. 1621 * interruptible for it's completion.
1495 */ 1622 */
1496int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1623int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1497{ 1624{
1498 struct dasd_device *device; 1625 return _dasd_sleep_on(cqr, 1);
1499 int rc;
1500
1501 device = cqr->startdev;
1502 cqr->callback = dasd_wakeup_cb;
1503 cqr->callback_data = (void *) &generic_waitq;
1504 dasd_add_request_tail(cqr);
1505 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1506 if (rc == -ERESTARTSYS) {
1507 dasd_cancel_req(cqr);
1508 /* wait (non-interruptible) for final status */
1509 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1510 cqr->intrc = rc;
1511 }
1512
1513 if (cqr->status == DASD_CQR_DONE)
1514 rc = 0;
1515 else if (cqr->intrc)
1516 rc = cqr->intrc;
1517 else
1518 rc = -EIO;
1519 return rc;
1520} 1626}
1521 1627
1522/* 1628/*
@@ -1549,7 +1655,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1549 } 1655 }
1550 1656
1551 cqr->callback = dasd_wakeup_cb; 1657 cqr->callback = dasd_wakeup_cb;
1552 cqr->callback_data = (void *) &generic_waitq; 1658 cqr->callback_data = DASD_SLEEPON_START_TAG;
1553 cqr->status = DASD_CQR_QUEUED; 1659 cqr->status = DASD_CQR_QUEUED;
1554 list_add(&cqr->devlist, &device->ccw_queue); 1660 list_add(&cqr->devlist, &device->ccw_queue);
1555 1661
@@ -1601,7 +1707,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
1601 cqr, rc); 1707 cqr, rc);
1602 } else { 1708 } else {
1603 cqr->stopclk = get_clock(); 1709 cqr->stopclk = get_clock();
1604 rc = 1;
1605 } 1710 }
1606 break; 1711 break;
1607 default: /* already finished or clear pending - do nothing */ 1712 default: /* already finished or clear pending - do nothing */
@@ -1630,7 +1735,7 @@ static void dasd_block_timeout(unsigned long ptr)
1630 block = (struct dasd_block *) ptr; 1735 block = (struct dasd_block *) ptr;
1631 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1736 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1632 /* re-activate request queue */ 1737 /* re-activate request queue */
1633 block->base->stopped &= ~DASD_STOPPED_PENDING; 1738 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1634 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1739 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1635 dasd_schedule_block_bh(block); 1740 dasd_schedule_block_bh(block);
1636} 1741}
@@ -1657,11 +1762,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
1657/* 1762/*
1658 * Process finished error recovery ccw. 1763 * Process finished error recovery ccw.
1659 */ 1764 */
1660static inline void __dasd_block_process_erp(struct dasd_block *block, 1765static void __dasd_process_erp(struct dasd_device *device,
1661 struct dasd_ccw_req *cqr) 1766 struct dasd_ccw_req *cqr)
1662{ 1767{
1663 dasd_erp_fn_t erp_fn; 1768 dasd_erp_fn_t erp_fn;
1664 struct dasd_device *device = block->base;
1665 1769
1666 if (cqr->status == DASD_CQR_DONE) 1770 if (cqr->status == DASD_CQR_DONE)
1667 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1771 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
@@ -1725,9 +1829,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1725 */ 1829 */
1726 if (!list_empty(&block->ccw_queue)) 1830 if (!list_empty(&block->ccw_queue))
1727 break; 1831 break;
1728 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1832 spin_lock_irqsave(
1729 basedev->stopped |= DASD_STOPPED_PENDING; 1833 get_ccwdev_lock(basedev->cdev), flags);
1730 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1834 dasd_device_set_stop_bits(basedev,
1835 DASD_STOPPED_PENDING);
1836 spin_unlock_irqrestore(
1837 get_ccwdev_lock(basedev->cdev), flags);
1731 dasd_block_set_timer(block, HZ/2); 1838 dasd_block_set_timer(block, HZ/2);
1732 break; 1839 break;
1733 } 1840 }
@@ -1795,7 +1902,8 @@ restart:
1795 /* Process requests that may be recovered */ 1902 /* Process requests that may be recovered */
1796 if (cqr->status == DASD_CQR_NEED_ERP) { 1903 if (cqr->status == DASD_CQR_NEED_ERP) {
1797 erp_fn = base->discipline->erp_action(cqr); 1904 erp_fn = base->discipline->erp_action(cqr);
1798 erp_fn(cqr); 1905 if (IS_ERR(erp_fn(cqr)))
1906 continue;
1799 goto restart; 1907 goto restart;
1800 } 1908 }
1801 1909
@@ -1813,7 +1921,7 @@ restart:
1813 cqr->status = DASD_CQR_FILLED; 1921 cqr->status = DASD_CQR_FILLED;
1814 cqr->retries = 255; 1922 cqr->retries = 255;
1815 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1923 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1816 base->stopped |= DASD_STOPPED_QUIESCE; 1924 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1817 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1925 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1818 flags); 1926 flags);
1819 goto restart; 1927 goto restart;
@@ -1821,7 +1929,7 @@ restart:
1821 1929
1822 /* Process finished ERP request. */ 1930 /* Process finished ERP request. */
1823 if (cqr->refers) { 1931 if (cqr->refers) {
1824 __dasd_block_process_erp(block, cqr); 1932 __dasd_process_erp(base, cqr);
1825 goto restart; 1933 goto restart;
1826 } 1934 }
1827 1935
@@ -1952,7 +2060,7 @@ restart_cb:
1952 /* Process finished ERP request. */ 2060 /* Process finished ERP request. */
1953 if (cqr->refers) { 2061 if (cqr->refers) {
1954 spin_lock_bh(&block->queue_lock); 2062 spin_lock_bh(&block->queue_lock);
1955 __dasd_block_process_erp(block, cqr); 2063 __dasd_process_erp(block->base, cqr);
1956 spin_unlock_bh(&block->queue_lock); 2064 spin_unlock_bh(&block->queue_lock);
1957 /* restart list_for_xx loop since dasd_process_erp 2065 /* restart list_for_xx loop since dasd_process_erp
1958 * might remove multiple elements */ 2066 * might remove multiple elements */
@@ -2036,9 +2144,8 @@ static void dasd_setup_queue(struct dasd_block *block)
2036 2144
2037 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2145 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2038 max = block->base->discipline->max_blocks << block->s2b_shift; 2146 max = block->base->discipline->max_blocks << block->s2b_shift;
2039 blk_queue_max_sectors(block->request_queue, max); 2147 blk_queue_max_hw_sectors(block->request_queue, max);
2040 blk_queue_max_phys_segments(block->request_queue, -1L); 2148 blk_queue_max_segments(block->request_queue, -1L);
2041 blk_queue_max_hw_segments(block->request_queue, -1L);
2042 /* with page sized segments we can translate each segement into 2149 /* with page sized segments we can translate each segement into
2043 * one idaw/tidaw 2150 * one idaw/tidaw
2044 */ 2151 */
@@ -2077,9 +2184,13 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2077static int dasd_open(struct block_device *bdev, fmode_t mode) 2184static int dasd_open(struct block_device *bdev, fmode_t mode)
2078{ 2185{
2079 struct dasd_block *block = bdev->bd_disk->private_data; 2186 struct dasd_block *block = bdev->bd_disk->private_data;
2080 struct dasd_device *base = block->base; 2187 struct dasd_device *base;
2081 int rc; 2188 int rc;
2082 2189
2190 if (!block)
2191 return -ENODEV;
2192
2193 base = block->base;
2083 atomic_inc(&block->open_count); 2194 atomic_inc(&block->open_count);
2084 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2195 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2085 rc = -ENODEV; 2196 rc = -ENODEV;
@@ -2106,6 +2217,13 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
2106 goto out; 2217 goto out;
2107 } 2218 }
2108 2219
2220 if ((mode & FMODE_WRITE) &&
2221 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2222 (base->features & DASD_FEATURE_READONLY))) {
2223 rc = -EROFS;
2224 goto out;
2225 }
2226
2109 return 0; 2227 return 0;
2110 2228
2111out: 2229out:
@@ -2183,6 +2301,34 @@ dasd_exit(void)
2183 * SECTION: common functions for ccw_driver use 2301 * SECTION: common functions for ccw_driver use
2184 */ 2302 */
2185 2303
2304/*
2305 * Is the device read-only?
2306 * Note that this function does not report the setting of the
2307 * readonly device attribute, but how it is configured in z/VM.
2308 */
2309int dasd_device_is_ro(struct dasd_device *device)
2310{
2311 struct ccw_dev_id dev_id;
2312 struct diag210 diag_data;
2313 int rc;
2314
2315 if (!MACHINE_IS_VM)
2316 return 0;
2317 ccw_device_get_id(device->cdev, &dev_id);
2318 memset(&diag_data, 0, sizeof(diag_data));
2319 diag_data.vrdcdvno = dev_id.devno;
2320 diag_data.vrdclen = sizeof(diag_data);
2321 rc = diag210(&diag_data);
2322 if (rc == 0 || rc == 2) {
2323 return diag_data.vrdcvfla & 0x80;
2324 } else {
2325 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2326 dev_id.devno, rc);
2327 return 0;
2328 }
2329}
2330EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2331
2186static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 2332static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2187{ 2333{
2188 struct ccw_device *cdev = data; 2334 struct ccw_device *cdev = data;
@@ -2192,11 +2338,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2192 if (ret) 2338 if (ret)
2193 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2339 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2194 dev_name(&cdev->dev), ret); 2340 dev_name(&cdev->dev), ret);
2195 else {
2196 struct dasd_device *device = dasd_device_from_cdev(cdev);
2197 wait_event(dasd_init_waitq, _wait_for_device(device));
2198 dasd_put_device(device);
2199 }
2200} 2341}
2201 2342
2202/* 2343/*
@@ -2208,18 +2349,11 @@ int dasd_generic_probe(struct ccw_device *cdev,
2208{ 2349{
2209 int ret; 2350 int ret;
2210 2351
2211 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2212 if (ret) {
2213 DBF_EVENT(DBF_WARNING,
2214 "dasd_generic_probe: could not set ccw-device options "
2215 "for %s\n", dev_name(&cdev->dev));
2216 return ret;
2217 }
2218 ret = dasd_add_sysfs_files(cdev); 2352 ret = dasd_add_sysfs_files(cdev);
2219 if (ret) { 2353 if (ret) {
2220 DBF_EVENT(DBF_WARNING, 2354 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2221 "dasd_generic_probe: could not add sysfs entries " 2355 "dasd_generic_probe: could not add "
2222 "for %s\n", dev_name(&cdev->dev)); 2356 "sysfs entries");
2223 return ret; 2357 return ret;
2224 } 2358 }
2225 cdev->handler = &dasd_int_handler; 2359 cdev->handler = &dasd_int_handler;
@@ -2338,6 +2472,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2338 } else 2472 } else
2339 pr_debug("dasd_generic device %s found\n", 2473 pr_debug("dasd_generic device %s found\n",
2340 dev_name(&cdev->dev)); 2474 dev_name(&cdev->dev));
2475
2476 wait_event(dasd_init_waitq, _wait_for_device(device));
2477
2341 dasd_put_device(device); 2478 dasd_put_device(device);
2342 return rc; 2479 return rc;
2343} 2480}
@@ -2418,16 +2555,16 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2418 cqr->status = DASD_CQR_QUEUED; 2555 cqr->status = DASD_CQR_QUEUED;
2419 cqr->retries++; 2556 cqr->retries++;
2420 } 2557 }
2421 device->stopped |= DASD_STOPPED_DC_WAIT; 2558 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2422 dasd_device_clear_timer(device); 2559 dasd_device_clear_timer(device);
2423 dasd_schedule_device_bh(device); 2560 dasd_schedule_device_bh(device);
2424 ret = 1; 2561 ret = 1;
2425 break; 2562 break;
2426 case CIO_OPER: 2563 case CIO_OPER:
2427 /* FIXME: add a sanity check. */ 2564 /* FIXME: add a sanity check. */
2428 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2565 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2429 if (device->stopped & DASD_UNRESUMED_PM) { 2566 if (device->stopped & DASD_UNRESUMED_PM) {
2430 device->stopped &= ~DASD_UNRESUMED_PM; 2567 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2431 dasd_restore_device(device); 2568 dasd_restore_device(device);
2432 ret = 1; 2569 ret = 1;
2433 break; 2570 break;
@@ -2452,7 +2589,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2452 if (IS_ERR(device)) 2589 if (IS_ERR(device))
2453 return PTR_ERR(device); 2590 return PTR_ERR(device);
2454 /* disallow new I/O */ 2591 /* disallow new I/O */
2455 device->stopped |= DASD_STOPPED_PM; 2592 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2456 /* clear active requests */ 2593 /* clear active requests */
2457 INIT_LIST_HEAD(&freeze_queue); 2594 INIT_LIST_HEAD(&freeze_queue);
2458 spin_lock_irq(get_ccwdev_lock(cdev)); 2595 spin_lock_irq(get_ccwdev_lock(cdev));
@@ -2504,14 +2641,18 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2504 return PTR_ERR(device); 2641 return PTR_ERR(device);
2505 2642
2506 /* allow new IO again */ 2643 /* allow new IO again */
2507 device->stopped &= ~DASD_STOPPED_PM; 2644 dasd_device_remove_stop_bits(device,
2508 device->stopped &= ~DASD_UNRESUMED_PM; 2645 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2509 2646
2510 dasd_schedule_device_bh(device); 2647 dasd_schedule_device_bh(device);
2511 2648
2512 if (device->discipline->restore) 2649 /*
2650 * call discipline restore function
2651 * if device is stopped do nothing e.g. for disconnected devices
2652 */
2653 if (device->discipline->restore && !(device->stopped))
2513 rc = device->discipline->restore(device); 2654 rc = device->discipline->restore(device);
2514 if (rc) 2655 if (rc || device->stopped)
2515 /* 2656 /*
2516 * if the resume failed for the DASD we put it in 2657 * if the resume failed for the DASD we put it in
2517 * an UNRESUMED stop state 2658 * an UNRESUMED stop state
@@ -2561,8 +2702,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2561 cqr->startdev = device; 2702 cqr->startdev = device;
2562 cqr->memdev = device; 2703 cqr->memdev = device;
2563 cqr->expires = 10*HZ; 2704 cqr->expires = 10*HZ;
2564 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2705 cqr->retries = 256;
2565 cqr->retries = 2;
2566 cqr->buildclk = get_clock(); 2706 cqr->buildclk = get_clock();
2567 cqr->status = DASD_CQR_FILLED; 2707 cqr->status = DASD_CQR_FILLED;
2568 return cqr; 2708 return cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e8ff7b0c961d..6632649dd6aa 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -10,9 +10,7 @@
10#define KMSG_COMPONENT "dasd-eckd" 10#define KMSG_COMPONENT "dasd-eckd"
11 11
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/slab.h>
14#include <asm/idals.h> 13#include <asm/idals.h>
15#include <asm/todclk.h>
16 14
17#define PRINTK_HEADER "dasd_erp(3990): " 15#define PRINTK_HEADER "dasd_erp(3990): "
18 16
@@ -70,8 +68,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
70 * processing until the started timer has expired or an related 68 * processing until the started timer has expired or an related
71 * interrupt was received. 69 * interrupt was received.
72 */ 70 */
73static void 71static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
74dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
75{ 72{
76 73
77 struct dasd_device *device = erp->startdev; 74 struct dasd_device *device = erp->startdev;
@@ -81,10 +78,13 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
81 "blocking request queue for %is", expires/HZ); 78 "blocking request queue for %is", expires/HZ);
82 79
83 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 80 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
84 device->stopped |= DASD_STOPPED_PENDING; 81 dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
85 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 82 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
86 erp->status = DASD_CQR_FILLED; 83 erp->status = DASD_CQR_FILLED;
87 dasd_block_set_timer(device->block, expires); 84 if (erp->block)
85 dasd_block_set_timer(erp->block, expires);
86 else
87 dasd_device_set_timer(device, expires);
88} 88}
89 89
90/* 90/*
@@ -243,9 +243,13 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
243 * DESCRIPTION 243 * DESCRIPTION
244 * Setup ERP to do the ERP action 1 (see Reference manual). 244 * Setup ERP to do the ERP action 1 (see Reference manual).
245 * Repeat the operation on a different channel path. 245 * Repeat the operation on a different channel path.
246 * If all alternate paths have been tried, the request is posted with a 246 * As deviation from the recommended recovery action, we reset the path mask
247 * permanent error. 247 * after we have tried each path and go through all paths a second time.
248 * Note: duplex handling is not implemented (yet). 248 * This will cover situations where only one path at a time is actually down,
249 * but all paths fail and recover just with the same sequence and timing as
250 * we try to use them (flapping links).
251 * If all alternate paths have been tried twice, the request is posted with
252 * a permanent error.
249 * 253 *
250 * PARAMETER 254 * PARAMETER
251 * erp pointer to the current ERP 255 * erp pointer to the current ERP
@@ -254,17 +258,25 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
254 * erp pointer to the ERP 258 * erp pointer to the ERP
255 * 259 *
256 */ 260 */
257static struct dasd_ccw_req * 261static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
258dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
259{ 262{
263 erp->function = dasd_3990_erp_action_1_sec;
264 dasd_3990_erp_alternate_path(erp);
265 return erp;
266}
260 267
268static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
269{
261 erp->function = dasd_3990_erp_action_1; 270 erp->function = dasd_3990_erp_action_1;
262
263 dasd_3990_erp_alternate_path(erp); 271 dasd_3990_erp_alternate_path(erp);
264 272 if (erp->status == DASD_CQR_FAILED) {
273 erp->status = DASD_CQR_FILLED;
274 erp->retries = 10;
275 erp->lpm = LPM_ANYPATH;
276 erp->function = dasd_3990_erp_action_1_sec;
277 }
265 return erp; 278 return erp;
266 279} /* end dasd_3990_erp_action_1(b) */
267} /* end dasd_3990_erp_action_1 */
268 280
269/* 281/*
270 * DASD_3990_ERP_ACTION_4 282 * DASD_3990_ERP_ACTION_4
@@ -1032,6 +1044,10 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1032 1044
1033 erp->retries = 5; 1045 erp->retries = 5;
1034 1046
1047 } else if (sense[1] & SNS1_WRITE_INHIBITED) {
1048 dev_err(&device->cdev->dev, "An I/O request was rejected"
1049 " because writing is inhibited\n");
1050 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1035 } else { 1051 } else {
1036 /* fatal error - set status to FAILED 1052 /* fatal error - set status to FAILED
1037 internal error 09 - Command Reject */ 1053 internal error 09 - Command Reject */
@@ -2270,7 +2286,8 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2270 2286
2271 if (cqr->cpmode == 1) { 2287 if (cqr->cpmode == 1) {
2272 cplength = 0; 2288 cplength = 0;
2273 datasize = sizeof(struct tcw) + sizeof(struct tsb); 2289 /* TCW needs to be 64 byte aligned, so leave enough room */
2290 datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb);
2274 } else { 2291 } else {
2275 cplength = 2; 2292 cplength = 2;
2276 datasize = 0; 2293 datasize = 0;
@@ -2292,17 +2309,21 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2292 cqr->retries); 2309 cqr->retries);
2293 dasd_block_set_timer(device->block, (HZ << 3)); 2310 dasd_block_set_timer(device->block, (HZ << 3));
2294 } 2311 }
2295 return cqr; 2312 return erp;
2296 } 2313 }
2297 2314
2315 ccw = cqr->cpaddr;
2298 if (cqr->cpmode == 1) { 2316 if (cqr->cpmode == 1) {
2299 /* make a shallow copy of the original tcw but set new tsb */ 2317 /* make a shallow copy of the original tcw but set new tsb */
2300 erp->cpmode = 1; 2318 erp->cpmode = 1;
2301 erp->cpaddr = erp->data; 2319 erp->cpaddr = PTR_ALIGN(erp->data, 64);
2302 tcw = erp->data; 2320 tcw = erp->cpaddr;
2303 tsb = (struct tsb *) &tcw[1]; 2321 tsb = (struct tsb *) &tcw[1];
2304 *tcw = *((struct tcw *)cqr->cpaddr); 2322 *tcw = *((struct tcw *)cqr->cpaddr);
2305 tcw->tsb = (long)tsb; 2323 tcw->tsb = (long)tsb;
2324 } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
2325 /* PSF cannot be chained from NOOP/TIC */
2326 erp->cpaddr = cqr->cpaddr;
2306 } else { 2327 } else {
2307 /* initialize request with default TIC to current ERP/CQR */ 2328 /* initialize request with default TIC to current ERP/CQR */
2308 ccw = erp->cpaddr; 2329 ccw = erp->cpaddr;
@@ -2351,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
2351 /* add erp and initialize with default TIC */ 2372 /* add erp and initialize with default TIC */
2352 erp = dasd_3990_erp_add_erp(cqr); 2373 erp = dasd_3990_erp_add_erp(cqr);
2353 2374
2375 if (IS_ERR(erp))
2376 return erp;
2377
2354 /* inspect sense, determine specific ERP if possible */ 2378 /* inspect sense, determine specific ERP if possible */
2355 if (erp != cqr) { 2379 if (erp != cqr) {
2356 2380
@@ -2487,6 +2511,8 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2487 2511
2488 erp = dasd_3990_erp_action_1(erp); 2512 erp = dasd_3990_erp_action_1(erp);
2489 2513
2514 } else if (erp->function == dasd_3990_erp_action_1_sec) {
2515 erp = dasd_3990_erp_action_1_sec(erp);
2490 } else if (erp->function == dasd_3990_erp_action_5) { 2516 } else if (erp->function == dasd_3990_erp_action_5) {
2491 2517
2492 /* retries have not been successful */ 2518 /* retries have not been successful */
@@ -2688,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2688 if (erp == NULL) { 2714 if (erp == NULL) {
2689 /* no matching erp found - set up erp */ 2715 /* no matching erp found - set up erp */
2690 erp = dasd_3990_erp_additional_erp(cqr); 2716 erp = dasd_3990_erp_additional_erp(cqr);
2717 if (IS_ERR(erp))
2718 return erp;
2691 } else { 2719 } else {
2692 /* matching erp found - set all leading erp's to DONE */ 2720 /* matching erp found - set all leading erp's to DONE */
2693 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2721 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 70a008c00522..8c4814258e93 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -8,6 +8,7 @@
8#define KMSG_COMPONENT "dasd-eckd" 8#define KMSG_COMPONENT "dasd-eckd"
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/slab.h>
11#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
12#include "dasd_int.h" 13#include "dasd_int.h"
13#include "dasd_eckd.h" 14#include "dasd_eckd.h"
@@ -152,6 +153,7 @@ static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); 153 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); 154 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock); 155 spin_lock_init(&lcu->lock);
156 init_completion(&lcu->lcu_setup);
155 return lcu; 157 return lcu;
156 158
157out_err4: 159out_err4:
@@ -217,7 +219,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
217 spin_unlock_irqrestore(&aliastree.lock, flags); 219 spin_unlock_irqrestore(&aliastree.lock, flags);
218 newlcu = _allocate_lcu(uid); 220 newlcu = _allocate_lcu(uid);
219 if (IS_ERR(newlcu)) 221 if (IS_ERR(newlcu))
220 return PTR_ERR(lcu); 222 return PTR_ERR(newlcu);
221 spin_lock_irqsave(&aliastree.lock, flags); 223 spin_lock_irqsave(&aliastree.lock, flags);
222 lcu = _find_lcu(server, uid); 224 lcu = _find_lcu(server, uid);
223 if (!lcu) { 225 if (!lcu) {
@@ -240,6 +242,67 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
240} 242}
241 243
242/* 244/*
245 * The first device to be registered on an LCU will have to do
246 * some additional setup steps to configure that LCU on the
247 * storage server. All further devices should wait with their
248 * initialization until the first device is done.
249 * To synchronize this work, the first device will call
250 * dasd_alias_lcu_setup_complete when it is done, and all
251 * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
252 */
253void dasd_alias_lcu_setup_complete(struct dasd_device *device)
254{
255 struct dasd_eckd_private *private;
256 unsigned long flags;
257 struct alias_server *server;
258 struct alias_lcu *lcu;
259 struct dasd_uid *uid;
260
261 private = (struct dasd_eckd_private *) device->private;
262 uid = &private->uid;
263 lcu = NULL;
264 spin_lock_irqsave(&aliastree.lock, flags);
265 server = _find_server(uid);
266 if (server)
267 lcu = _find_lcu(server, uid);
268 spin_unlock_irqrestore(&aliastree.lock, flags);
269 if (!lcu) {
270 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
271 "could not find lcu for %04x %02x",
272 uid->ssid, uid->real_unit_addr);
273 WARN_ON(1);
274 return;
275 }
276 complete_all(&lcu->lcu_setup);
277}
278
279void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
280{
281 struct dasd_eckd_private *private;
282 unsigned long flags;
283 struct alias_server *server;
284 struct alias_lcu *lcu;
285 struct dasd_uid *uid;
286
287 private = (struct dasd_eckd_private *) device->private;
288 uid = &private->uid;
289 lcu = NULL;
290 spin_lock_irqsave(&aliastree.lock, flags);
291 server = _find_server(uid);
292 if (server)
293 lcu = _find_lcu(server, uid);
294 spin_unlock_irqrestore(&aliastree.lock, flags);
295 if (!lcu) {
296 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
297 "could not find lcu for %04x %02x",
298 uid->ssid, uid->real_unit_addr);
299 WARN_ON(1);
300 return;
301 }
302 wait_for_completion(&lcu->lcu_setup);
303}
304
305/*
243 * This function removes a device from the scope of alias management. 306 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by 307 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work. 308 * any of the workers. If necessary cancel the work.
@@ -755,11 +818,11 @@ static void __stop_device_on_lcu(struct dasd_device *device,
755{ 818{
756 /* If pos == device then device is already locked! */ 819 /* If pos == device then device is already locked! */
757 if (pos == device) { 820 if (pos == device) {
758 pos->stopped |= DASD_STOPPED_SU; 821 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
759 return; 822 return;
760 } 823 }
761 spin_lock(get_ccwdev_lock(pos->cdev)); 824 spin_lock(get_ccwdev_lock(pos->cdev));
762 pos->stopped |= DASD_STOPPED_SU; 825 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
763 spin_unlock(get_ccwdev_lock(pos->cdev)); 826 spin_unlock(get_ccwdev_lock(pos->cdev));
764} 827}
765 828
@@ -793,26 +856,26 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
793 856
794 list_for_each_entry(device, &lcu->active_devices, alias_list) { 857 list_for_each_entry(device, &lcu->active_devices, alias_list) {
795 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 858 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
796 device->stopped &= ~DASD_STOPPED_SU; 859 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
797 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 860 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
798 } 861 }
799 862
800 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 863 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 864 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 device->stopped &= ~DASD_STOPPED_SU; 865 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 866 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
804 } 867 }
805 868
806 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 869 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 870 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 871 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
809 device->stopped &= ~DASD_STOPPED_SU; 872 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
810 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 873 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
811 flags); 874 flags);
812 } 875 }
813 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 876 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 877 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
815 device->stopped &= ~DASD_STOPPED_SU; 878 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 879 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
817 flags); 880 flags);
818 } 881 }
@@ -836,7 +899,8 @@ static void summary_unit_check_handling_work(struct work_struct *work)
836 899
837 /* 2. reset summary unit check */ 900 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 901 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
839 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING); 902 dasd_device_remove_stop_bits(device,
903 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
840 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 904 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
841 reset_summary_unit_check(lcu, device, suc_data->reason); 905 reset_summary_unit_check(lcu, device, suc_data->reason);
842 906
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 4cac5b54f26a..eff9c812c5c2 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -18,6 +18,7 @@
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h>
21 22
22#include <asm/debug.h> 23#include <asm/debug.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
@@ -742,6 +743,7 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
742 const char *buf, size_t count) 743 const char *buf, size_t count)
743{ 744{
744 struct dasd_devmap *devmap; 745 struct dasd_devmap *devmap;
746 struct dasd_device *device;
745 int val; 747 int val;
746 char *endp; 748 char *endp;
747 749
@@ -758,12 +760,14 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
758 devmap->features |= DASD_FEATURE_READONLY; 760 devmap->features |= DASD_FEATURE_READONLY;
759 else 761 else
760 devmap->features &= ~DASD_FEATURE_READONLY; 762 devmap->features &= ~DASD_FEATURE_READONLY;
761 if (devmap->device) 763 device = devmap->device;
762 devmap->device->features = devmap->features; 764 if (device) {
763 if (devmap->device && devmap->device->block 765 device->features = devmap->features;
764 && devmap->device->block->gdp) 766 val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
765 set_disk_ro(devmap->device->block->gdp, val); 767 }
766 spin_unlock(&dasd_devmap_lock); 768 spin_unlock(&dasd_devmap_lock);
769 if (device && device->block && device->block->gdp)
770 set_disk_ro(device->block->gdp, val);
767 return count; 771 return count;
768} 772}
769 773
@@ -874,12 +878,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
874 ssize_t len; 878 ssize_t len;
875 879
876 device = dasd_device_from_cdev(to_ccwdev(dev)); 880 device = dasd_device_from_cdev(to_ccwdev(dev));
877 if (!IS_ERR(device) && device->discipline) { 881 if (IS_ERR(device))
882 goto out;
883 else if (!device->discipline) {
884 dasd_put_device(device);
885 goto out;
886 } else {
878 len = snprintf(buf, PAGE_SIZE, "%s\n", 887 len = snprintf(buf, PAGE_SIZE, "%s\n",
879 device->discipline->name); 888 device->discipline->name);
880 dasd_put_device(device); 889 dasd_put_device(device);
881 } else 890 return len;
882 len = snprintf(buf, PAGE_SIZE, "none\n"); 891 }
892out:
893 len = snprintf(buf, PAGE_SIZE, "none\n");
883 return len; 894 return len;
884} 895}
885 896
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 4e49b4a6c880..687f323cdc38 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd-diag" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -24,7 +24,6 @@
24#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h>
28#include <asm/vtoc.h> 27#include <asm/vtoc.h>
29#include <asm/diag.h> 28#include <asm/diag.h>
30 29
@@ -145,9 +144,16 @@ dasd_diag_erp(struct dasd_device *device)
145 144
146 mdsk_term_io(device); 145 mdsk_term_io(device);
147 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
147 if (rc == 4) {
148 if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
149 pr_warning("%s: The access mode of a DIAG device "
150 "changed to read-only\n",
151 dev_name(&device->cdev->dev));
152 rc = 0;
153 }
148 if (rc) 154 if (rc)
149 dev_warn(&device->cdev->dev, "DIAG ERP failed with " 155 pr_warning("%s: DIAG ERP failed with "
150 "rc=%d\n", rc); 156 "rc=%d\n", dev_name(&device->cdev->dev), rc);
151} 157}
152 158
153/* Start a given request at the device. Return zero on success, non-zero 159/* Start a given request at the device. Return zero on success, non-zero
@@ -363,8 +369,9 @@ dasd_diag_check_device(struct dasd_device *device)
363 private->pt_block = 2; 369 private->pt_block = 2;
364 break; 370 break;
365 default: 371 default:
366 dev_warn(&device->cdev->dev, "Device type %d is not supported " 372 pr_warning("%s: Device type %d is not supported "
367 "in DIAG mode\n", private->rdc_data.vdev_class); 373 "in DIAG mode\n", dev_name(&device->cdev->dev),
374 private->rdc_data.vdev_class);
368 rc = -EOPNOTSUPP; 375 rc = -EOPNOTSUPP;
369 goto out; 376 goto out;
370 } 377 }
@@ -405,8 +412,8 @@ dasd_diag_check_device(struct dasd_device *device)
405 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 412 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
406 rc = dia250(&private->iob, RW_BIO); 413 rc = dia250(&private->iob, RW_BIO);
407 if (rc == 3) { 414 if (rc == 3) {
408 dev_warn(&device->cdev->dev, 415 pr_warning("%s: A 64-bit DIAG call failed\n",
409 "A 64-bit DIAG call failed\n"); 416 dev_name(&device->cdev->dev));
410 rc = -EOPNOTSUPP; 417 rc = -EOPNOTSUPP;
411 goto out_label; 418 goto out_label;
412 } 419 }
@@ -415,8 +422,9 @@ dasd_diag_check_device(struct dasd_device *device)
415 break; 422 break;
416 } 423 }
417 if (bsize > PAGE_SIZE) { 424 if (bsize > PAGE_SIZE) {
418 dev_warn(&device->cdev->dev, "Accessing the DASD failed because" 425 pr_warning("%s: Accessing the DASD failed because of an "
419 " of an incorrect format (rc=%d)\n", rc); 426 "incorrect format (rc=%d)\n",
427 dev_name(&device->cdev->dev), rc);
420 rc = -EIO; 428 rc = -EIO;
421 goto out_label; 429 goto out_label;
422 } 430 }
@@ -433,16 +441,20 @@ dasd_diag_check_device(struct dasd_device *device)
433 for (sb = 512; sb < bsize; sb = sb << 1) 441 for (sb = 512; sb < bsize; sb = sb << 1)
434 block->s2b_shift++; 442 block->s2b_shift++;
435 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 443 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
436 if (rc) { 444 if (rc && (rc != 4)) {
437 dev_warn(&device->cdev->dev, "DIAG initialization " 445 pr_warning("%s: DIAG initialization failed with rc=%d\n",
438 "failed with rc=%d\n", rc); 446 dev_name(&device->cdev->dev), rc);
439 rc = -EIO; 447 rc = -EIO;
440 } else { 448 } else {
441 dev_info(&device->cdev->dev, 449 if (rc == 4)
442 "New DASD with %ld byte/block, total size %ld KB\n", 450 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
443 (unsigned long) block->bp_block, 451 pr_info("%s: New DASD with %ld byte/block, total size %ld "
444 (unsigned long) (block->blocks << 452 "KB%s\n", dev_name(&device->cdev->dev),
445 block->s2b_shift) >> 1); 453 (unsigned long) block->bp_block,
454 (unsigned long) (block->blocks <<
455 block->s2b_shift) >> 1,
456 (rc == 4) ? ", read-only device" : "");
457 rc = 0;
446 } 458 }
447out_label: 459out_label:
448 free_page((long) label); 460 free_page((long) label);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 417b97cd3f94..0cb233116855 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -23,8 +23,8 @@
23#include <asm/debug.h> 23#include <asm/debug.h>
24#include <asm/idals.h> 24#include <asm/idals.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/compat.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/todclk.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/cio.h> 29#include <asm/cio.h>
30#include <asm/ccwdev.h> 30#include <asm/ccwdev.h>
@@ -78,6 +78,11 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 78
79static struct ccw_driver dasd_eckd_driver; /* see below */ 79static struct ccw_driver dasd_eckd_driver; /* see below */
80 80
81#define INIT_CQR_OK 0
82#define INIT_CQR_UNFORMATTED 1
83#define INIT_CQR_ERROR 2
84
85
81/* initial attempt at a probe function. this can be simplified once 86/* initial attempt at a probe function. this can be simplified once
82 * the other detection code is gone */ 87 * the other detection code is gone */
83static int 88static int
@@ -86,11 +91,12 @@ dasd_eckd_probe (struct ccw_device *cdev)
86 int ret; 91 int ret;
87 92
88 /* set ECKD specific ccw-device options */ 93 /* set ECKD specific ccw-device options */
89 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 94 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
95 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
90 if (ret) { 96 if (ret) {
91 DBF_EVENT(DBF_WARNING, 97 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
92 "dasd_eckd_probe: could not set ccw-device options " 98 "dasd_eckd_probe: could not set "
93 "for %s\n", dev_name(&cdev->dev)); 99 "ccw-device options");
94 return ret; 100 return ret;
95 } 101 }
96 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 102 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -749,8 +755,7 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
749 cqr->block = NULL; 755 cqr->block = NULL;
750 cqr->expires = 10*HZ; 756 cqr->expires = 10*HZ;
751 cqr->lpm = lpm; 757 cqr->lpm = lpm;
752 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 758 cqr->retries = 256;
753 cqr->retries = 2;
754 cqr->buildclk = get_clock(); 759 cqr->buildclk = get_clock();
755 cqr->status = DASD_CQR_FILLED; 760 cqr->status = DASD_CQR_FILLED;
756 return cqr; 761 return cqr;
@@ -885,16 +890,15 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
885 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 890 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
886 &conf_len, lpm); 891 &conf_len, lpm);
887 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 892 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
888 DBF_EVENT(DBF_WARNING, 893 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
889 "Read configuration data returned " 894 "Read configuration data returned "
890 "error %d for device: %s", rc, 895 "error %d", rc);
891 dev_name(&device->cdev->dev));
892 return rc; 896 return rc;
893 } 897 }
894 if (conf_data == NULL) { 898 if (conf_data == NULL) {
895 DBF_EVENT(DBF_WARNING, "No configuration " 899 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
896 "data retrieved for device: %s", 900 "No configuration data "
897 dev_name(&device->cdev->dev)); 901 "retrieved");
898 continue; /* no error */ 902 continue; /* no error */
899 } 903 }
900 /* save first valid configuration data */ 904 /* save first valid configuration data */
@@ -941,16 +945,14 @@ static int dasd_eckd_read_features(struct dasd_device *device)
941 sizeof(struct dasd_rssd_features)), 945 sizeof(struct dasd_rssd_features)),
942 device); 946 device);
943 if (IS_ERR(cqr)) { 947 if (IS_ERR(cqr)) {
944 DBF_EVENT(DBF_WARNING, "Could not allocate initialization " 948 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
945 "request for device: %s", 949 "allocate initialization request");
946 dev_name(&device->cdev->dev));
947 return PTR_ERR(cqr); 950 return PTR_ERR(cqr);
948 } 951 }
949 cqr->startdev = device; 952 cqr->startdev = device;
950 cqr->memdev = device; 953 cqr->memdev = device;
951 cqr->block = NULL; 954 cqr->block = NULL;
952 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 955 cqr->retries = 256;
953 cqr->retries = 5;
954 cqr->expires = 10 * HZ; 956 cqr->expires = 10 * HZ;
955 957
956 /* Prepare for Read Subsystem Data */ 958 /* Prepare for Read Subsystem Data */
@@ -1012,9 +1014,9 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1012 } 1014 }
1013 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1015 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1014 psf_ssc_data->order = PSF_ORDER_SSC; 1016 psf_ssc_data->order = PSF_ORDER_SSC;
1015 psf_ssc_data->suborder = 0x40; 1017 psf_ssc_data->suborder = 0xc0;
1016 if (enable_pav) { 1018 if (enable_pav) {
1017 psf_ssc_data->suborder |= 0x88; 1019 psf_ssc_data->suborder |= 0x08;
1018 psf_ssc_data->reserved[0] = 0x88; 1020 psf_ssc_data->reserved[0] = 0x88;
1019 } 1021 }
1020 ccw = cqr->cpaddr; 1022 ccw = cqr->cpaddr;
@@ -1025,6 +1027,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1025 cqr->startdev = device; 1027 cqr->startdev = device;
1026 cqr->memdev = device; 1028 cqr->memdev = device;
1027 cqr->block = NULL; 1029 cqr->block = NULL;
1030 cqr->retries = 256;
1028 cqr->expires = 10*HZ; 1031 cqr->expires = 10*HZ;
1029 cqr->buildclk = get_clock(); 1032 cqr->buildclk = get_clock();
1030 cqr->status = DASD_CQR_FILLED; 1033 cqr->status = DASD_CQR_FILLED;
@@ -1057,7 +1060,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1057/* 1060/*
1058 * Valide storage server of current device. 1061 * Valide storage server of current device.
1059 */ 1062 */
1060static int dasd_eckd_validate_server(struct dasd_device *device) 1063static void dasd_eckd_validate_server(struct dasd_device *device)
1061{ 1064{
1062 int rc; 1065 int rc;
1063 struct dasd_eckd_private *private; 1066 struct dasd_eckd_private *private;
@@ -1068,15 +1071,12 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
1068 else 1071 else
1069 enable_pav = 1; 1072 enable_pav = 1;
1070 rc = dasd_eckd_psf_ssc(device, enable_pav); 1073 rc = dasd_eckd_psf_ssc(device, enable_pav);
1074
1071 /* may be requested feature is not available on server, 1075 /* may be requested feature is not available on server,
1072 * therefore just report error and go ahead */ 1076 * therefore just report error and go ahead */
1073 private = (struct dasd_eckd_private *) device->private; 1077 private = (struct dasd_eckd_private *) device->private;
1074 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " 1078 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1075 "returned rc=%d for device: %s", 1079 "returned rc=%d", private->uid.ssid, rc);
1076 private->uid.vendor, private->uid.serial,
1077 private->uid.ssid, rc, dev_name(&device->cdev->dev));
1078 /* RE-Read Configuration Data */
1079 return dasd_eckd_read_conf(device);
1080} 1080}
1081 1081
1082/* 1082/*
@@ -1089,7 +1089,17 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1089 struct dasd_eckd_private *private; 1089 struct dasd_eckd_private *private;
1090 struct dasd_block *block; 1090 struct dasd_block *block;
1091 int is_known, rc; 1091 int is_known, rc;
1092 int readonly;
1092 1093
1094 if (!ccw_device_is_pathgroup(device->cdev)) {
1095 dev_warn(&device->cdev->dev,
1096 "A channel path group could not be established\n");
1097 return -EIO;
1098 }
1099 if (!ccw_device_is_multipath(device->cdev)) {
1100 dev_info(&device->cdev->dev,
1101 "The DASD is not operating in multipath mode\n");
1102 }
1093 private = (struct dasd_eckd_private *) device->private; 1103 private = (struct dasd_eckd_private *) device->private;
1094 if (!private) { 1104 if (!private) {
1095 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1105 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
@@ -1123,9 +1133,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1123 if (private->uid.type == UA_BASE_DEVICE) { 1133 if (private->uid.type == UA_BASE_DEVICE) {
1124 block = dasd_alloc_block(); 1134 block = dasd_alloc_block();
1125 if (IS_ERR(block)) { 1135 if (IS_ERR(block)) {
1126 DBF_EVENT(DBF_WARNING, "could not allocate dasd " 1136 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1127 "block structure for device: %s", 1137 "could not allocate dasd "
1128 dev_name(&device->cdev->dev)); 1138 "block structure");
1129 rc = PTR_ERR(block); 1139 rc = PTR_ERR(block);
1130 goto out_err1; 1140 goto out_err1;
1131 } 1141 }
@@ -1139,12 +1149,21 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1139 rc = is_known; 1149 rc = is_known;
1140 goto out_err2; 1150 goto out_err2;
1141 } 1151 }
1152 /*
1153 * dasd_eckd_vaildate_server is done on the first device that
1154 * is found for an LCU. All later other devices have to wait
1155 * for it, so they will read the correct feature codes.
1156 */
1142 if (!is_known) { 1157 if (!is_known) {
1143 /* new lcu found */ 1158 dasd_eckd_validate_server(device);
1144 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 1159 dasd_alias_lcu_setup_complete(device);
1145 if (rc) 1160 } else
1146 goto out_err3; 1161 dasd_alias_wait_for_lcu_setup(device);
1147 } 1162
1163 /* device may report different configuration data after LCU setup */
1164 rc = dasd_eckd_read_conf(device);
1165 if (rc)
1166 goto out_err3;
1148 1167
1149 /* Read Feature Codes */ 1168 /* Read Feature Codes */
1150 dasd_eckd_read_features(device); 1169 dasd_eckd_read_features(device);
@@ -1153,9 +1172,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1153 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 1172 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1154 &private->rdc_data, 64); 1173 &private->rdc_data, 64);
1155 if (rc) { 1174 if (rc) {
1156 DBF_EVENT(DBF_WARNING, 1175 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1157 "Read device characteristics failed, rc=%d for " 1176 "Read device characteristic failed, rc=%d", rc);
1158 "device: %s", rc, dev_name(&device->cdev->dev));
1159 goto out_err3; 1177 goto out_err3;
1160 } 1178 }
1161 /* find the vaild cylinder size */ 1179 /* find the vaild cylinder size */
@@ -1165,15 +1183,20 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1165 else 1183 else
1166 private->real_cyl = private->rdc_data.no_cyl; 1184 private->real_cyl = private->rdc_data.no_cyl;
1167 1185
1186 readonly = dasd_device_is_ro(device);
1187 if (readonly)
1188 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1189
1168 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 1190 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1169 "with %d cylinders, %d heads, %d sectors\n", 1191 "with %d cylinders, %d heads, %d sectors%s\n",
1170 private->rdc_data.dev_type, 1192 private->rdc_data.dev_type,
1171 private->rdc_data.dev_model, 1193 private->rdc_data.dev_model,
1172 private->rdc_data.cu_type, 1194 private->rdc_data.cu_type,
1173 private->rdc_data.cu_model.model, 1195 private->rdc_data.cu_model.model,
1174 private->real_cyl, 1196 private->real_cyl,
1175 private->rdc_data.trk_per_cyl, 1197 private->rdc_data.trk_per_cyl,
1176 private->rdc_data.sec_per_trk); 1198 private->rdc_data.sec_per_trk,
1199 readonly ? ", read-only device" : "");
1177 return 0; 1200 return 0;
1178 1201
1179out_err3: 1202out_err3:
@@ -1256,12 +1279,29 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1256 cqr->block = NULL; 1279 cqr->block = NULL;
1257 cqr->startdev = device; 1280 cqr->startdev = device;
1258 cqr->memdev = device; 1281 cqr->memdev = device;
1259 cqr->retries = 0; 1282 cqr->retries = 255;
1260 cqr->buildclk = get_clock(); 1283 cqr->buildclk = get_clock();
1261 cqr->status = DASD_CQR_FILLED; 1284 cqr->status = DASD_CQR_FILLED;
1262 return cqr; 1285 return cqr;
1263} 1286}
1264 1287
1288/* differentiate between 'no record found' and any other error */
1289static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1290{
1291 char *sense;
1292 if (init_cqr->status == DASD_CQR_DONE)
1293 return INIT_CQR_OK;
1294 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1295 init_cqr->status == DASD_CQR_FAILED) {
1296 sense = dasd_get_sense(&init_cqr->irb);
1297 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1298 return INIT_CQR_UNFORMATTED;
1299 else
1300 return INIT_CQR_ERROR;
1301 } else
1302 return INIT_CQR_ERROR;
1303}
1304
1265/* 1305/*
1266 * This is the callback function for the init_analysis cqr. It saves 1306 * This is the callback function for the init_analysis cqr. It saves
1267 * the status of the initial analysis ccw before it frees it and kicks 1307 * the status of the initial analysis ccw before it frees it and kicks
@@ -1269,21 +1309,20 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1269 * dasd_eckd_do_analysis again (if the devices has not been marked 1309 * dasd_eckd_do_analysis again (if the devices has not been marked
1270 * for deletion in the meantime). 1310 * for deletion in the meantime).
1271 */ 1311 */
1272static void 1312static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1273dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1313 void *data)
1274{ 1314{
1275 struct dasd_eckd_private *private; 1315 struct dasd_eckd_private *private;
1276 struct dasd_device *device; 1316 struct dasd_device *device;
1277 1317
1278 device = init_cqr->startdev; 1318 device = init_cqr->startdev;
1279 private = (struct dasd_eckd_private *) device->private; 1319 private = (struct dasd_eckd_private *) device->private;
1280 private->init_cqr_status = init_cqr->status; 1320 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1281 dasd_sfree_request(init_cqr, device); 1321 dasd_sfree_request(init_cqr, device);
1282 dasd_kick_device(device); 1322 dasd_kick_device(device);
1283} 1323}
1284 1324
1285static int 1325static int dasd_eckd_start_analysis(struct dasd_block *block)
1286dasd_eckd_start_analysis(struct dasd_block *block)
1287{ 1326{
1288 struct dasd_eckd_private *private; 1327 struct dasd_eckd_private *private;
1289 struct dasd_ccw_req *init_cqr; 1328 struct dasd_ccw_req *init_cqr;
@@ -1295,27 +1334,44 @@ dasd_eckd_start_analysis(struct dasd_block *block)
1295 init_cqr->callback = dasd_eckd_analysis_callback; 1334 init_cqr->callback = dasd_eckd_analysis_callback;
1296 init_cqr->callback_data = NULL; 1335 init_cqr->callback_data = NULL;
1297 init_cqr->expires = 5*HZ; 1336 init_cqr->expires = 5*HZ;
1337 /* first try without ERP, so we can later handle unformatted
1338 * devices as special case
1339 */
1340 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1341 init_cqr->retries = 0;
1298 dasd_add_request_head(init_cqr); 1342 dasd_add_request_head(init_cqr);
1299 return -EAGAIN; 1343 return -EAGAIN;
1300} 1344}
1301 1345
1302static int 1346static int dasd_eckd_end_analysis(struct dasd_block *block)
1303dasd_eckd_end_analysis(struct dasd_block *block)
1304{ 1347{
1305 struct dasd_device *device; 1348 struct dasd_device *device;
1306 struct dasd_eckd_private *private; 1349 struct dasd_eckd_private *private;
1307 struct eckd_count *count_area; 1350 struct eckd_count *count_area;
1308 unsigned int sb, blk_per_trk; 1351 unsigned int sb, blk_per_trk;
1309 int status, i; 1352 int status, i;
1353 struct dasd_ccw_req *init_cqr;
1310 1354
1311 device = block->base; 1355 device = block->base;
1312 private = (struct dasd_eckd_private *) device->private; 1356 private = (struct dasd_eckd_private *) device->private;
1313 status = private->init_cqr_status; 1357 status = private->init_cqr_status;
1314 private->init_cqr_status = -1; 1358 private->init_cqr_status = -1;
1315 if (status != DASD_CQR_DONE) { 1359 if (status == INIT_CQR_ERROR) {
1316 dev_warn(&device->cdev->dev, 1360 /* try again, this time with full ERP */
1317 "The DASD is not formatted\n"); 1361 init_cqr = dasd_eckd_analysis_ccw(device);
1362 dasd_sleep_on(init_cqr);
1363 status = dasd_eckd_analysis_evaluation(init_cqr);
1364 dasd_sfree_request(init_cqr, device);
1365 }
1366
1367 if (status == INIT_CQR_UNFORMATTED) {
1368 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1318 return -EMEDIUMTYPE; 1369 return -EMEDIUMTYPE;
1370 } else if (status == INIT_CQR_ERROR) {
1371 dev_err(&device->cdev->dev,
1372 "Detecting the DASD disk layout failed because "
1373 "of an I/O error\n");
1374 return -EIO;
1319 } 1375 }
1320 1376
1321 private->uses_cdl = 1; 1377 private->uses_cdl = 1;
@@ -1607,8 +1663,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1607 } 1663 }
1608 fcp->startdev = device; 1664 fcp->startdev = device;
1609 fcp->memdev = device; 1665 fcp->memdev = device;
1610 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1666 fcp->retries = 256;
1611 fcp->retries = 5; /* set retry counter to enable default ERP */
1612 fcp->buildclk = get_clock(); 1667 fcp->buildclk = get_clock();
1613 fcp->status = DASD_CQR_FILLED; 1668 fcp->status = DASD_CQR_FILLED;
1614 return fcp; 1669 return fcp;
@@ -2690,6 +2745,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2690 cqr->startdev = device; 2745 cqr->startdev = device;
2691 cqr->memdev = device; 2746 cqr->memdev = device;
2692 cqr->retries = 0; 2747 cqr->retries = 0;
2748 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2693 cqr->expires = 10 * HZ; 2749 cqr->expires = 10 * HZ;
2694 2750
2695 /* Prepare for Read Subsystem Data */ 2751 /* Prepare for Read Subsystem Data */
@@ -2789,19 +2845,27 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2789 char *psf_data, *rssd_result; 2845 char *psf_data, *rssd_result;
2790 struct dasd_ccw_req *cqr; 2846 struct dasd_ccw_req *cqr;
2791 struct ccw1 *ccw; 2847 struct ccw1 *ccw;
2848 char psf0, psf1;
2792 int rc; 2849 int rc;
2793 2850
2851 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
2852 return -EACCES;
2853 psf0 = psf1 = 0;
2854
2794 /* Copy parms from caller */ 2855 /* Copy parms from caller */
2795 rc = -EFAULT; 2856 rc = -EFAULT;
2796 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 2857 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2797 goto out; 2858 goto out;
2798#ifndef CONFIG_64BIT 2859 if (is_compat_task() || sizeof(long) == 4) {
2799 /* Make sure pointers are sane even on 31 bit. */ 2860 /* Make sure pointers are sane even on 31 bit. */
2800 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2801 rc = -EINVAL; 2861 rc = -EINVAL;
2802 goto out; 2862 if ((usrparm.psf_data >> 32) != 0)
2863 goto out;
2864 if ((usrparm.rssd_result >> 32) != 0)
2865 goto out;
2866 usrparm.psf_data &= 0x7fffffffULL;
2867 usrparm.rssd_result &= 0x7fffffffULL;
2803 } 2868 }
2804#endif
2805 /* alloc I/O data area */ 2869 /* alloc I/O data area */
2806 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 2870 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2807 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 2871 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
@@ -2816,12 +2880,8 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2816 (void __user *)(unsigned long) usrparm.psf_data, 2880 (void __user *)(unsigned long) usrparm.psf_data,
2817 usrparm.psf_data_len)) 2881 usrparm.psf_data_len))
2818 goto out_free; 2882 goto out_free;
2819 2883 psf0 = psf_data[0];
2820 /* sanity check on syscall header */ 2884 psf1 = psf_data[1];
2821 if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2822 rc = -EINVAL;
2823 goto out_free;
2824 }
2825 2885
2826 /* setup CCWs for PSF + RSSD */ 2886 /* setup CCWs for PSF + RSSD */
2827 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); 2887 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
@@ -2872,7 +2932,9 @@ out_free:
2872 kfree(rssd_result); 2932 kfree(rssd_result);
2873 kfree(psf_data); 2933 kfree(psf_data);
2874out: 2934out:
2875 DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc); 2935 DBF_DEV_EVENT(DBF_WARNING, device,
2936 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
2937 (int) psf0, (int) psf1, rc);
2876 return rc; 2938 return rc;
2877} 2939}
2878 2940
@@ -2980,7 +3042,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
2980 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3042 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2981 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3043 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
2982 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3044 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
2983 scsw_cc(&irb->scsw), req->intrc); 3045 scsw_cc(&irb->scsw), req ? req->intrc : 0);
2984 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3046 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2985 " device %s: Failing CCW: %p\n", 3047 " device %s: Failing CCW: %p\n",
2986 dev_name(&device->cdev->dev), 3048 dev_name(&device->cdev->dev),
@@ -3093,11 +3155,11 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3093 3155
3094 tsb = NULL; 3156 tsb = NULL;
3095 sense = NULL; 3157 sense = NULL;
3096 if (irb->scsw.tm.tcw) 3158 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01))
3097 tsb = tcw_get_tsb( 3159 tsb = tcw_get_tsb(
3098 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3160 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3099 3161
3100 if (tsb && (irb->scsw.tm.fcxs == 0x01)) { 3162 if (tsb) {
3101 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3163 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3102 " tsb->length %d\n", tsb->length); 3164 " tsb->length %d\n", tsb->length);
3103 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3165 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -3240,11 +3302,15 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3240 if (is_known < 0) 3302 if (is_known < 0)
3241 return is_known; 3303 return is_known;
3242 if (!is_known) { 3304 if (!is_known) {
3243 /* new lcu found */ 3305 dasd_eckd_validate_server(device);
3244 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 3306 dasd_alias_lcu_setup_complete(device);
3245 if (rc) 3307 } else
3246 goto out_err; 3308 dasd_alias_wait_for_lcu_setup(device);
3247 } 3309
3310 /* RE-Read Configuration Data */
3311 rc = dasd_eckd_read_conf(device);
3312 if (rc)
3313 goto out_err;
3248 3314
3249 /* Read Feature Codes */ 3315 /* Read Feature Codes */
3250 dasd_eckd_read_features(device); 3316 dasd_eckd_read_features(device);
@@ -3253,9 +3319,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3253 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 3319 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3254 &temp_rdc_data, 64); 3320 &temp_rdc_data, 64);
3255 if (rc) { 3321 if (rc) {
3256 DBF_EVENT(DBF_WARNING, 3322 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3257 "Read device characteristics failed, rc=%d for " 3323 "Read device characteristic failed, rc=%d", rc);
3258 "device: %s", rc, dev_name(&device->cdev->dev));
3259 goto out_err; 3324 goto out_err;
3260 } 3325 }
3261 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 3326 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad45bcac3ce4..864d53c04201 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -414,6 +414,7 @@ struct alias_lcu {
414 struct summary_unit_check_work_data suc_data; 414 struct summary_unit_check_work_data suc_data;
415 struct read_uac_work_data ruac_data; 415 struct read_uac_work_data ruac_data;
416 struct dasd_ccw_req *rsu_cqr; 416 struct dasd_ccw_req *rsu_cqr;
417 struct completion lcu_setup;
417}; 418};
418 419
419struct alias_pav_group { 420struct alias_pav_group {
@@ -460,5 +461,6 @@ int dasd_alias_remove_device(struct dasd_device *);
460struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 461struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
461void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); 462void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
462void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 463void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
463 464void dasd_alias_lcu_setup_complete(struct dasd_device *);
465void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
464#endif /* DASD_ECKD_H */ 466#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d96039eae59b..dd88803e4899 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/slab.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/atomic.h> 25#include <asm/atomic.h>
@@ -536,7 +537,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
536 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 537 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
537 if (!eerb) 538 if (!eerb)
538 return -ENOMEM; 539 return -ENOMEM;
539 lock_kernel();
540 eerb->buffer_page_count = eer_pages; 540 eerb->buffer_page_count = eer_pages;
541 if (eerb->buffer_page_count < 1 || 541 if (eerb->buffer_page_count < 1 ||
542 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 542 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -544,7 +544,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
544 DBF_EVENT(DBF_WARNING, "can't open device since module " 544 DBF_EVENT(DBF_WARNING, "can't open device since module "
545 "parameter eer_pages is smaller than 1 or" 545 "parameter eer_pages is smaller than 1 or"
546 " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); 546 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
547 unlock_kernel();
548 return -EINVAL; 547 return -EINVAL;
549 } 548 }
550 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 549 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -552,14 +551,12 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
552 GFP_KERNEL); 551 GFP_KERNEL);
553 if (!eerb->buffer) { 552 if (!eerb->buffer) {
554 kfree(eerb); 553 kfree(eerb);
555 unlock_kernel();
556 return -ENOMEM; 554 return -ENOMEM;
557 } 555 }
558 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 556 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
559 eerb->buffer_page_count)) { 557 eerb->buffer_page_count)) {
560 kfree(eerb->buffer); 558 kfree(eerb->buffer);
561 kfree(eerb); 559 kfree(eerb);
562 unlock_kernel();
563 return -ENOMEM; 560 return -ENOMEM;
564 } 561 }
565 filp->private_data = eerb; 562 filp->private_data = eerb;
@@ -567,7 +564,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
567 list_add(&eerb->list, &bufferlist); 564 list_add(&eerb->list, &bufferlist);
568 spin_unlock_irqrestore(&bufferlock, flags); 565 spin_unlock_irqrestore(&bufferlock, flags);
569 566
570 unlock_kernel();
571 return nonseekable_open(inp,filp); 567 return nonseekable_open(inp,filp);
572} 568}
573 569
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f245377e8e27..37282b90eecc 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -20,7 +20,6 @@
20#include <asm/idals.h> 20#include <asm/idals.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/todclk.h>
24#include <asm/ccwdev.h> 23#include <asm/ccwdev.h>
25 24
26#include "dasd_int.h" 25#include "dasd_int.h"
@@ -125,6 +124,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
125 struct dasd_fba_private *private; 124 struct dasd_fba_private *private;
126 struct ccw_device *cdev = device->cdev; 125 struct ccw_device *cdev = device->cdev;
127 int rc; 126 int rc;
127 int readonly;
128 128
129 private = (struct dasd_fba_private *) device->private; 129 private = (struct dasd_fba_private *) device->private;
130 if (!private) { 130 if (!private) {
@@ -141,9 +141,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
141 } 141 }
142 block = dasd_alloc_block(); 142 block = dasd_alloc_block();
143 if (IS_ERR(block)) { 143 if (IS_ERR(block)) {
144 DBF_EVENT(DBF_WARNING, "could not allocate dasd block " 144 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
145 "structure for device: %s", 145 "dasd block structure");
146 dev_name(&device->cdev->dev));
147 device->private = NULL; 146 device->private = NULL;
148 kfree(private); 147 kfree(private);
149 return PTR_ERR(block); 148 return PTR_ERR(block);
@@ -155,9 +154,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, 154 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
156 &private->rdc_data, 32); 155 &private->rdc_data, 32);
157 if (rc) { 156 if (rc) {
158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 157 DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
159 "error %d for device: %s", 158 "characteristics returned error %d", rc);
160 rc, dev_name(&device->cdev->dev));
161 device->block = NULL; 159 device->block = NULL;
162 dasd_free_block(block); 160 dasd_free_block(block);
163 device->private = NULL; 161 device->private = NULL;
@@ -165,16 +163,21 @@ dasd_fba_check_characteristics(struct dasd_device *device)
165 return rc; 163 return rc;
166 } 164 }
167 165
166 readonly = dasd_device_is_ro(device);
167 if (readonly)
168 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
169
168 dev_info(&device->cdev->dev, 170 dev_info(&device->cdev->dev,
169 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB " 171 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
170 "and %d B/blk\n", 172 "and %d B/blk%s\n",
171 cdev->id.dev_type, 173 cdev->id.dev_type,
172 cdev->id.dev_model, 174 cdev->id.dev_model,
173 cdev->id.cu_type, 175 cdev->id.cu_type,
174 cdev->id.cu_model, 176 cdev->id.cu_model,
175 ((private->rdc_data.blk_bdsa * 177 ((private->rdc_data.blk_bdsa *
176 (private->rdc_data.blk_size >> 9)) >> 11), 178 (private->rdc_data.blk_size >> 9)) >> 11),
177 private->rdc_data.blk_size); 179 private->rdc_data.blk_size,
180 readonly ? ", read-only device" : "");
178 return 0; 181 return 0;
179} 182}
180 183
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d3198303b93c..30a1ca3d08b7 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -70,7 +70,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
70 } 70 }
71 len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26)); 71 len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
72 72
73 if (block->base->features & DASD_FEATURE_READONLY) 73 if (base->features & DASD_FEATURE_READONLY ||
74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
74 set_disk_ro(gdp, 1); 75 set_disk_ro(gdp, 1);
75 gdp->private_data = block; 76 gdp->private_data = block;
76 gdp->queue = block->request_queue; 77 gdp->queue = block->request_queue;
@@ -88,6 +89,7 @@ void dasd_gendisk_free(struct dasd_block *block)
88 if (block->gdp) { 89 if (block->gdp) {
89 del_gendisk(block->gdp); 90 del_gendisk(block->gdp);
90 block->gdp->queue = NULL; 91 block->gdp->queue = NULL;
92 block->gdp->private_data = NULL;
91 put_disk(block->gdp); 93 put_disk(block->gdp);
92 block->gdp = NULL; 94 block->gdp = NULL;
93 } 95 }
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8afd9fa00875..a91d4a97d4f2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -108,6 +108,16 @@ do { \
108 d_data); \ 108 d_data); \
109} while(0) 109} while(0)
110 110
111#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
112do { \
113 struct ccw_dev_id __dev_id; \
114 ccw_device_get_id(d_cdev, &__dev_id); \
115 debug_sprintf_event(dasd_debug_area, \
116 d_level, \
117 "0.%x.%04x " d_str "\n", \
118 __dev_id.ssid, __dev_id.devno, d_data); \
119} while (0)
120
111#define DBF_EXC(d_level, d_str, d_data...)\ 121#define DBF_EXC(d_level, d_str, d_data...)\
112do { \ 122do { \
113 debug_sprintf_exception(dasd_debug_area, \ 123 debug_sprintf_exception(dasd_debug_area, \
@@ -358,6 +368,7 @@ struct dasd_device {
358 368
359 /* Device state and target state. */ 369 /* Device state and target state. */
360 int state, target; 370 int state, target;
371 struct mutex state_mutex;
361 int stopped; /* device (ccw_device_start) was stopped */ 372 int stopped; /* device (ccw_device_start) was stopped */
362 373
363 /* reference count. */ 374 /* reference count. */
@@ -425,6 +436,10 @@ struct dasd_block {
425#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ 436#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
426#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ 437#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
427#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ 438#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
439#define DASD_FLAG_DEVICE_RO 6 /* The device itself is read-only. Don't
440 * confuse this with the user specified
441 * read-only feature.
442 */
428 443
429void dasd_put_device_wake(struct dasd_device *); 444void dasd_put_device_wake(struct dasd_device *);
430 445
@@ -595,6 +610,12 @@ int dasd_generic_restore_device(struct ccw_device *);
595int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 610int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
596char *dasd_get_sense(struct irb *); 611char *dasd_get_sense(struct irb *);
597 612
613void dasd_device_set_stop_bits(struct dasd_device *, int);
614void dasd_device_remove_stop_bits(struct dasd_device *, int);
615
616int dasd_device_is_ro(struct dasd_device *);
617
618
598/* externals in dasd_devmap.c */ 619/* externals in dasd_devmap.c */
599extern int dasd_max_devindex; 620extern int dasd_max_devindex;
600extern int dasd_probeonly; 621extern int dasd_probeonly;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f756a1b0c57a..1557214944f7 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -17,7 +17,8 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20 20#include <linux/slab.h>
21#include <asm/compat.h>
21#include <asm/ccwdev.h> 22#include <asm/ccwdev.h>
22#include <asm/cmb.h> 23#include <asm/cmb.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
@@ -101,7 +102,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
101 pr_info("%s: The DASD has been put in the quiesce " 102 pr_info("%s: The DASD has been put in the quiesce "
102 "state\n", dev_name(&base->cdev->dev)); 103 "state\n", dev_name(&base->cdev->dev));
103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 104 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
104 base->stopped |= DASD_STOPPED_QUIESCE; 105 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 106 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
106 return 0; 107 return 0;
107} 108}
@@ -122,7 +123,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
122 pr_info("%s: I/O operations have been resumed " 123 pr_info("%s: I/O operations have been resumed "
123 "on the DASD\n", dev_name(&base->cdev->dev)); 124 "on the DASD\n", dev_name(&base->cdev->dev));
124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 125 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
125 base->stopped &= ~DASD_STOPPED_QUIESCE; 126 dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 127 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
127 128
128 dasd_schedule_block_bh(block); 129 dasd_schedule_block_bh(block);
@@ -199,7 +200,8 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
199 if (!argp) 200 if (!argp)
200 return -EINVAL; 201 return -EINVAL;
201 202
202 if (block->base->features & DASD_FEATURE_READONLY) 203 if (block->base->features & DASD_FEATURE_READONLY ||
204 test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags))
203 return -EROFS; 205 return -EROFS;
204 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 206 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
205 return -EFAULT; 207 return -EFAULT;
@@ -260,7 +262,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
260 struct ccw_dev_id dev_id; 262 struct ccw_dev_id dev_id;
261 263
262 base = block->base; 264 base = block->base;
263 if (!base->discipline->fill_info) 265 if (!base->discipline || !base->discipline->fill_info)
264 return -EINVAL; 266 return -EINVAL;
265 267
266 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 268 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
@@ -303,10 +305,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
303 dasd_info->features |= 305 dasd_info->features |=
304 ((base->features & DASD_FEATURE_READONLY) != 0); 306 ((base->features & DASD_FEATURE_READONLY) != 0);
305 307
306 if (base->discipline) 308 memcpy(dasd_info->type, base->discipline->name, 4);
307 memcpy(dasd_info->type, base->discipline->name, 4);
308 else
309 memcpy(dasd_info->type, "none", 4);
310 309
311 if (block->request_queue->request_fn) { 310 if (block->request_queue->request_fn) {
312 struct list_head *l; 311 struct list_head *l;
@@ -352,15 +351,15 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
352 return -EINVAL; 351 return -EINVAL;
353 if (get_user(intval, (int __user *)argp)) 352 if (get_user(intval, (int __user *)argp))
354 return -EFAULT; 353 return -EFAULT;
355 354 if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags))
355 return -EROFS;
356 set_disk_ro(bdev->bd_disk, intval); 356 set_disk_ro(bdev->bd_disk, intval);
357 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); 357 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval);
358} 358}
359 359
360static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, 360static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
361 unsigned long arg) 361 struct cmbdata __user *argp)
362{ 362{
363 struct cmbdata __user *argp = (void __user *) arg;
364 size_t size = _IOC_SIZE(cmd); 363 size_t size = _IOC_SIZE(cmd);
365 struct cmbdata data; 364 struct cmbdata data;
366 int ret; 365 int ret;
@@ -376,7 +375,12 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
376 unsigned int cmd, unsigned long arg) 375 unsigned int cmd, unsigned long arg)
377{ 376{
378 struct dasd_block *block = bdev->bd_disk->private_data; 377 struct dasd_block *block = bdev->bd_disk->private_data;
379 void __user *argp = (void __user *)arg; 378 void __user *argp;
379
380 if (is_compat_task())
381 argp = compat_ptr(arg);
382 else
383 argp = (void __user *)arg;
380 384
381 if (!block) 385 if (!block)
382 return -ENODEV; 386 return -ENODEV;
@@ -414,7 +418,7 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
414 case BIODASDCMFDISABLE: 418 case BIODASDCMFDISABLE:
415 return disable_cmf(block->base->cdev); 419 return disable_cmf(block->base->cdev);
416 case BIODASDREADALLCMB: 420 case BIODASDREADALLCMB:
417 return dasd_ioctl_readall_cmb(block, cmd, arg); 421 return dasd_ioctl_readall_cmb(block, cmd, argp);
418 default: 422 default:
419 /* if the discipline has an ioctl method try it. */ 423 /* if the discipline has an ioctl method try it. */
420 if (block->base->discipline->ioctl) { 424 if (block->base->discipline->ioctl) {
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 654daa3cdfda..2eb025592809 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -14,6 +14,8 @@
14#define KMSG_COMPONENT "dasd" 14#define KMSG_COMPONENT "dasd"
15 15
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/slab.h>
18#include <linux/string.h>
17#include <linux/seq_file.h> 19#include <linux/seq_file.h>
18#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
19#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
@@ -71,7 +73,7 @@ dasd_devices_show(struct seq_file *m, void *v)
71 /* Print device number. */ 73 /* Print device number. */
72 seq_printf(m, "%s", dev_name(&device->cdev->dev)); 74 seq_printf(m, "%s", dev_name(&device->cdev->dev));
73 /* Print discipline string. */ 75 /* Print discipline string. */
74 if (device != NULL && device->discipline != NULL) 76 if (device->discipline != NULL)
75 seq_printf(m, "(%s)", device->discipline->name); 77 seq_printf(m, "(%s)", device->discipline->name);
76 else 78 else
77 seq_printf(m, "(none)"); 79 seq_printf(m, "(none)");
@@ -91,10 +93,7 @@ dasd_devices_show(struct seq_file *m, void *v)
91 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; 93 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
92 seq_printf(m, "%4s: ", substr); 94 seq_printf(m, "%4s: ", substr);
93 /* Print device status information. */ 95 /* Print device status information. */
94 switch ((device != NULL) ? device->state : -1) { 96 switch (device->state) {
95 case -1:
96 seq_printf(m, "unknown");
97 break;
98 case DASD_STATE_NEW: 97 case DASD_STATE_NEW:
99 seq_printf(m, "new"); 98 seq_printf(m, "new");
100 break; 99 break;
@@ -167,99 +166,82 @@ static const struct file_operations dasd_devices_file_ops = {
167 .release = seq_release, 166 .release = seq_release,
168}; 167};
169 168
170static int
171dasd_calc_metrics(char *page, char **start, off_t off,
172 int count, int *eof, int len)
173{
174 len = (len > off) ? len - off : 0;
175 if (len > count)
176 len = count;
177 if (len < count)
178 *eof = 1;
179 *start = page + off;
180 return len;
181}
182
183#ifdef CONFIG_DASD_PROFILE 169#ifdef CONFIG_DASD_PROFILE
184static char * 170static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
185dasd_statistics_array(char *str, unsigned int *array, int factor)
186{ 171{
187 int i; 172 int i;
188 173
189 for (i = 0; i < 32; i++) { 174 for (i = 0; i < 32; i++) {
190 str += sprintf(str, "%7d ", array[i] / factor); 175 seq_printf(m, "%7d ", array[i] / factor);
191 if (i == 15) 176 if (i == 15)
192 str += sprintf(str, "\n"); 177 seq_putc(m, '\n');
193 } 178 }
194 str += sprintf(str,"\n"); 179 seq_putc(m, '\n');
195 return str;
196} 180}
197#endif /* CONFIG_DASD_PROFILE */ 181#endif /* CONFIG_DASD_PROFILE */
198 182
199static int 183static int dasd_stats_proc_show(struct seq_file *m, void *v)
200dasd_statistics_read(char *page, char **start, off_t off,
201 int count, int *eof, void *data)
202{ 184{
203 unsigned long len;
204#ifdef CONFIG_DASD_PROFILE 185#ifdef CONFIG_DASD_PROFILE
205 struct dasd_profile_info_t *prof; 186 struct dasd_profile_info_t *prof;
206 char *str;
207 int factor; 187 int factor;
208 188
209 /* check for active profiling */ 189 /* check for active profiling */
210 if (dasd_profile_level == DASD_PROFILE_OFF) { 190 if (dasd_profile_level == DASD_PROFILE_OFF) {
211 len = sprintf(page, "Statistics are off - they might be " 191 seq_printf(m, "Statistics are off - they might be "
212 "switched on using 'echo set on > " 192 "switched on using 'echo set on > "
213 "/proc/dasd/statistics'\n"); 193 "/proc/dasd/statistics'\n");
214 return dasd_calc_metrics(page, start, off, count, eof, len); 194 return 0;
215 } 195 }
216 196
217 prof = &dasd_global_profile; 197 prof = &dasd_global_profile;
218 /* prevent couter 'overflow' on output */ 198 /* prevent counter 'overflow' on output */
219 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; 199 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
220 factor *= 10); 200 factor *= 10);
221 201
222 str = page; 202 seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
223 str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); 203 seq_printf(m, "with %u sectors(512B each)\n",
224 str += sprintf(str, "with %u sectors(512B each)\n",
225 prof->dasd_io_sects); 204 prof->dasd_io_sects);
226 str += sprintf(str, "Scale Factor is %d\n", factor); 205 seq_printf(m, "Scale Factor is %d\n", factor);
227 str += sprintf(str, 206 seq_printf(m,
228 " __<4 ___8 __16 __32 __64 _128 " 207 " __<4 ___8 __16 __32 __64 _128 "
229 " _256 _512 __1k __2k __4k __8k " 208 " _256 _512 __1k __2k __4k __8k "
230 " _16k _32k _64k 128k\n"); 209 " _16k _32k _64k 128k\n");
231 str += sprintf(str, 210 seq_printf(m,
232 " _256 _512 __1M __2M __4M __8M " 211 " _256 _512 __1M __2M __4M __8M "
233 " _16M _32M _64M 128M 256M 512M " 212 " _16M _32M _64M 128M 256M 512M "
234 " __1G __2G __4G " " _>4G\n"); 213 " __1G __2G __4G " " _>4G\n");
235 214
236 str += sprintf(str, "Histogram of sizes (512B secs)\n"); 215 seq_printf(m, "Histogram of sizes (512B secs)\n");
237 str = dasd_statistics_array(str, prof->dasd_io_secs, factor); 216 dasd_statistics_array(m, prof->dasd_io_secs, factor);
238 str += sprintf(str, "Histogram of I/O times (microseconds)\n"); 217 seq_printf(m, "Histogram of I/O times (microseconds)\n");
239 str = dasd_statistics_array(str, prof->dasd_io_times, factor); 218 dasd_statistics_array(m, prof->dasd_io_times, factor);
240 str += sprintf(str, "Histogram of I/O times per sector\n"); 219 seq_printf(m, "Histogram of I/O times per sector\n");
241 str = dasd_statistics_array(str, prof->dasd_io_timps, factor); 220 dasd_statistics_array(m, prof->dasd_io_timps, factor);
242 str += sprintf(str, "Histogram of I/O time till ssch\n"); 221 seq_printf(m, "Histogram of I/O time till ssch\n");
243 str = dasd_statistics_array(str, prof->dasd_io_time1, factor); 222 dasd_statistics_array(m, prof->dasd_io_time1, factor);
244 str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); 223 seq_printf(m, "Histogram of I/O time between ssch and irq\n");
245 str = dasd_statistics_array(str, prof->dasd_io_time2, factor); 224 dasd_statistics_array(m, prof->dasd_io_time2, factor);
246 str += sprintf(str, "Histogram of I/O time between ssch " 225 seq_printf(m, "Histogram of I/O time between ssch "
247 "and irq per sector\n"); 226 "and irq per sector\n");
248 str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); 227 dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
249 str += sprintf(str, "Histogram of I/O time between irq and end\n"); 228 seq_printf(m, "Histogram of I/O time between irq and end\n");
250 str = dasd_statistics_array(str, prof->dasd_io_time3, factor); 229 dasd_statistics_array(m, prof->dasd_io_time3, factor);
251 str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); 230 seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
252 str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); 231 dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
253 len = str - page;
254#else 232#else
255 len = sprintf(page, "Statistics are not activated in this kernel\n"); 233 seq_printf(m, "Statistics are not activated in this kernel\n");
256#endif 234#endif
257 return dasd_calc_metrics(page, start, off, count, eof, len); 235 return 0;
258} 236}
259 237
260static int 238static int dasd_stats_proc_open(struct inode *inode, struct file *file)
261dasd_statistics_write(struct file *file, const char __user *user_buf, 239{
262 unsigned long user_len, void *data) 240 return single_open(file, dasd_stats_proc_show, NULL);
241}
242
243static ssize_t dasd_stats_proc_write(struct file *file,
244 const char __user *user_buf, size_t user_len, loff_t *pos)
263{ 245{
264#ifdef CONFIG_DASD_PROFILE 246#ifdef CONFIG_DASD_PROFILE
265 char *buffer, *str; 247 char *buffer, *str;
@@ -272,10 +254,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
272 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); 254 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
273 255
274 /* check for valid verbs */ 256 /* check for valid verbs */
275 for (str = buffer; isspace(*str); str++); 257 str = skip_spaces(buffer);
276 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) { 258 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
277 /* 'set xxx' was given */ 259 /* 'set xxx' was given */
278 for (str = str + 4; isspace(*str); str++); 260 str = skip_spaces(str + 4);
279 if (strcmp(str, "on") == 0) { 261 if (strcmp(str, "on") == 0) {
280 /* switch on statistics profiling */ 262 /* switch on statistics profiling */
281 dasd_profile_level = DASD_PROFILE_ON; 263 dasd_profile_level = DASD_PROFILE_ON;
@@ -310,6 +292,15 @@ out_error:
310#endif /* CONFIG_DASD_PROFILE */ 292#endif /* CONFIG_DASD_PROFILE */
311} 293}
312 294
295static const struct file_operations dasd_stats_proc_fops = {
296 .owner = THIS_MODULE,
297 .open = dasd_stats_proc_open,
298 .read = seq_read,
299 .llseek = seq_lseek,
300 .release = single_release,
301 .write = dasd_stats_proc_write,
302};
303
313/* 304/*
314 * Create dasd proc-fs entries. 305 * Create dasd proc-fs entries.
315 * In case creation failed, cleanup and return -ENOENT. 306 * In case creation failed, cleanup and return -ENOENT.
@@ -326,13 +317,12 @@ dasd_proc_init(void)
326 &dasd_devices_file_ops); 317 &dasd_devices_file_ops);
327 if (!dasd_devices_entry) 318 if (!dasd_devices_entry)
328 goto out_nodevices; 319 goto out_nodevices;
329 dasd_statistics_entry = create_proc_entry("statistics", 320 dasd_statistics_entry = proc_create("statistics",
330 S_IFREG | S_IRUGO | S_IWUSR, 321 S_IFREG | S_IRUGO | S_IWUSR,
331 dasd_proc_root_entry); 322 dasd_proc_root_entry,
323 &dasd_stats_proc_fops);
332 if (!dasd_statistics_entry) 324 if (!dasd_statistics_entry)
333 goto out_nostatistics; 325 goto out_nostatistics;
334 dasd_statistics_entry->read_proc = dasd_statistics_read;
335 dasd_statistics_entry->write_proc = dasd_statistics_write;
336 return 0; 326 return 0;
337 327
338 out_nostatistics: 328 out_nostatistics:
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f76f4bd82b9f..9b43ae94beba 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev)
1005 return 0; 1005 return 0;
1006} 1006}
1007 1007
1008static struct dev_pm_ops dcssblk_pm_ops = { 1008static const struct dev_pm_ops dcssblk_pm_ops = {
1009 .freeze = dcssblk_freeze, 1009 .freeze = dcssblk_freeze,
1010 .thaw = dcssblk_thaw, 1010 .thaw = dcssblk_thaw,
1011 .restore = dcssblk_restore, 1011 .restore = dcssblk_restore,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 116d1b3eeb15..c881a14fa5dd 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -33,7 +33,6 @@
33#include <linux/ctype.h> /* isdigit, isxdigit */ 33#include <linux/ctype.h> /* isdigit, isxdigit */
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
37#include <linux/blkdev.h> 36#include <linux/blkdev.h>
38#include <linux/blkpg.h> 37#include <linux/blkpg.h>
39#include <linux/hdreg.h> /* HDIO_GETGEO */ 38#include <linux/hdreg.h> /* HDIO_GETGEO */
@@ -41,6 +40,7 @@
41#include <linux/bio.h> 40#include <linux/bio.h>
42#include <linux/suspend.h> 41#include <linux/suspend.h>
43#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/gfp.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45 45
46#define XPRAM_NAME "xpram" 46#define XPRAM_NAME "xpram"
@@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410static struct dev_pm_ops xpram_pm_ops = { 410static const struct dev_pm_ops xpram_pm_ops = {
411 .restore = xpram_restore, 411 .restore = xpram_restore,
412}; 412};
413 413
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 21639d6c996f..59ec073724bf 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -857,7 +857,6 @@ static struct console con3215 = {
857 857
858/* 858/*
859 * 3215 console initialization code called from console_init(). 859 * 3215 console initialization code called from console_init().
860 * NOTE: This is called before kmalloc is available.
861 */ 860 */
862static int __init con3215_init(void) 861static int __init con3215_init(void)
863{ 862{
@@ -1038,22 +1037,6 @@ static void tty3215_flush_buffer(struct tty_struct *tty)
1038} 1037}
1039 1038
1040/* 1039/*
1041 * Currently we don't have any io controls for 3215 ttys
1042 */
1043static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
1044 unsigned int cmd, unsigned long arg)
1045{
1046 if (tty->flags & (1 << TTY_IO_ERROR))
1047 return -EIO;
1048
1049 switch (cmd) {
1050 default:
1051 return -ENOIOCTLCMD;
1052 }
1053 return 0;
1054}
1055
1056/*
1057 * Disable reading from a 3215 tty 1040 * Disable reading from a 3215 tty
1058 */ 1041 */
1059static void tty3215_throttle(struct tty_struct * tty) 1042static void tty3215_throttle(struct tty_struct * tty)
@@ -1118,7 +1101,6 @@ static const struct tty_operations tty3215_ops = {
1118 .write_room = tty3215_write_room, 1101 .write_room = tty3215_write_room,
1119 .chars_in_buffer = tty3215_chars_in_buffer, 1102 .chars_in_buffer = tty3215_chars_in_buffer,
1120 .flush_buffer = tty3215_flush_buffer, 1103 .flush_buffer = tty3215_flush_buffer,
1121 .ioctl = tty3215_ioctl,
1122 .throttle = tty3215_throttle, 1104 .throttle = tty3215_throttle,
1123 .unthrottle = tty3215_unthrottle, 1105 .unthrottle = tty3215_unthrottle,
1124 .stop = tty3215_stop, 1106 .stop = tty3215_stop,
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb838bdf829d..bb07577e8fd4 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -12,6 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/slab.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/reboot.h> 17#include <linux/reboot.h>
17 18
@@ -572,7 +573,6 @@ static struct console con3270 = {
572 573
573/* 574/*
574 * 3270 console initialization code called from console_init(). 575 * 3270 console initialization code called from console_init().
575 * NOTE: This is called before kmalloc is available.
576 */ 576 */
577static int __init 577static int __init
578con3270_init(void) 578con3270_init(void)
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 097d3846a828..0eabcca3c92d 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -12,9 +12,11 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/smp_lock.h> 17#include <linux/smp_lock.h>
17 18
19#include <asm/compat.h>
18#include <asm/ccwdev.h> 20#include <asm/ccwdev.h>
19#include <asm/cio.h> 21#include <asm/cio.h>
20#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
@@ -38,6 +40,8 @@ struct fs3270 {
38 size_t rdbuf_size; /* size of data returned by RDBUF */ 40 size_t rdbuf_size; /* size of data returned by RDBUF */
39}; 41};
40 42
43static DEFINE_MUTEX(fs3270_mutex);
44
41static void 45static void
42fs3270_wake_up(struct raw3270_request *rq, void *data) 46fs3270_wake_up(struct raw3270_request *rq, void *data)
43{ 47{
@@ -74,7 +78,7 @@ fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
74 } 78 }
75 rc = raw3270_start(view, rq); 79 rc = raw3270_start(view, rq);
76 if (rc == 0) { 80 if (rc == 0) {
77 /* Started sucessfully. Now wait for completion. */ 81 /* Started successfully. Now wait for completion. */
78 wait_event(fp->wait, raw3270_request_final(rq)); 82 wait_event(fp->wait, raw3270_request_final(rq));
79 } 83 }
80 } while (rc == -EACCES); 84 } while (rc == -EACCES);
@@ -320,6 +324,7 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
320static long 324static long
321fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 325fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
322{ 326{
327 char __user *argp;
323 struct fs3270 *fp; 328 struct fs3270 *fp;
324 struct raw3270_iocb iocb; 329 struct raw3270_iocb iocb;
325 int rc; 330 int rc;
@@ -327,8 +332,12 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
327 fp = filp->private_data; 332 fp = filp->private_data;
328 if (!fp) 333 if (!fp)
329 return -ENODEV; 334 return -ENODEV;
335 if (is_compat_task())
336 argp = compat_ptr(arg);
337 else
338 argp = (char __user *)arg;
330 rc = 0; 339 rc = 0;
331 lock_kernel(); 340 mutex_lock(&fs3270_mutex);
332 switch (cmd) { 341 switch (cmd) {
333 case TUBICMD: 342 case TUBICMD:
334 fp->read_command = arg; 343 fp->read_command = arg;
@@ -337,10 +346,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
337 fp->write_command = arg; 346 fp->write_command = arg;
338 break; 347 break;
339 case TUBGETI: 348 case TUBGETI:
340 rc = put_user(fp->read_command, (char __user *) arg); 349 rc = put_user(fp->read_command, argp);
341 break; 350 break;
342 case TUBGETO: 351 case TUBGETO:
343 rc = put_user(fp->write_command,(char __user *) arg); 352 rc = put_user(fp->write_command, argp);
344 break; 353 break;
345 case TUBGETMOD: 354 case TUBGETMOD:
346 iocb.model = fp->view.model; 355 iocb.model = fp->view.model;
@@ -349,12 +358,11 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
349 iocb.pf_cnt = 24; 358 iocb.pf_cnt = 24;
350 iocb.re_cnt = 20; 359 iocb.re_cnt = 20;
351 iocb.map = 0; 360 iocb.map = 0;
352 if (copy_to_user((char __user *) arg, &iocb, 361 if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
353 sizeof(struct raw3270_iocb)))
354 rc = -EFAULT; 362 rc = -EFAULT;
355 break; 363 break;
356 } 364 }
357 unlock_kernel(); 365 mutex_unlock(&fs3270_mutex);
358 return rc; 366 return rc;
359} 367}
360 368
@@ -437,7 +445,7 @@ fs3270_open(struct inode *inode, struct file *filp)
437 minor = tty->index + RAW3270_FIRSTMINOR; 445 minor = tty->index + RAW3270_FIRSTMINOR;
438 tty_kref_put(tty); 446 tty_kref_put(tty);
439 } 447 }
440 lock_kernel(); 448 mutex_lock(&fs3270_mutex);
441 /* Check if some other program is already using fullscreen mode. */ 449 /* Check if some other program is already using fullscreen mode. */
442 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 450 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
443 if (!IS_ERR(fp)) { 451 if (!IS_ERR(fp)) {
@@ -465,7 +473,7 @@ fs3270_open(struct inode *inode, struct file *filp)
465 if (IS_ERR(ib)) { 473 if (IS_ERR(ib)) {
466 raw3270_put_view(&fp->view); 474 raw3270_put_view(&fp->view);
467 raw3270_del_view(&fp->view); 475 raw3270_del_view(&fp->view);
468 rc = PTR_ERR(fp); 476 rc = PTR_ERR(ib);
469 goto out; 477 goto out;
470 } 478 }
471 fp->rdbuf = ib; 479 fp->rdbuf = ib;
@@ -478,7 +486,7 @@ fs3270_open(struct inode *inode, struct file *filp)
478 } 486 }
479 filp->private_data = fp; 487 filp->private_data = fp;
480out: 488out:
481 unlock_kernel(); 489 mutex_unlock(&fs3270_mutex);
482 return rc; 490 return rc;
483} 491}
484 492
@@ -509,8 +517,8 @@ static const struct file_operations fs3270_fops = {
509 .write = fs3270_write, /* write */ 517 .write = fs3270_write, /* write */
510 .unlocked_ioctl = fs3270_ioctl, /* ioctl */ 518 .unlocked_ioctl = fs3270_ioctl, /* ioctl */
511 .compat_ioctl = fs3270_ioctl, /* ioctl */ 519 .compat_ioctl = fs3270_ioctl, /* ioctl */
512 .open = fs3270_open, /* open */ 520 .open = fs3270_open, /* open */
513 .release = fs3270_close, /* release */ 521 .release = fs3270_close, /* release */
514}; 522};
515 523
516/* 524/*
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index cee4d4e42429..cb6bffe7141a 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/slab.h>
12#include <linux/sysrq.h> 13#include <linux/sysrq.h>
13 14
14#include <linux/consolemap.h> 15#include <linux/consolemap.h>
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 66e21dd23154..2ed3f82e5c30 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/smp_lock.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -22,6 +21,7 @@
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/poll.h> 22#include <linux/poll.h>
24#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/slab.h>
25#include <net/iucv/iucv.h> 25#include <net/iucv/iucv.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/ebcdic.h> 27#include <asm/ebcdic.h>
@@ -283,7 +283,6 @@ static int mon_open(struct inode *inode, struct file *filp)
283 /* 283 /*
284 * only one user allowed 284 * only one user allowed
285 */ 285 */
286 lock_kernel();
287 rc = -EBUSY; 286 rc = -EBUSY;
288 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 287 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
289 goto out; 288 goto out;
@@ -321,7 +320,6 @@ static int mon_open(struct inode *inode, struct file *filp)
321 } 320 }
322 filp->private_data = monpriv; 321 filp->private_data = monpriv;
323 dev_set_drvdata(monreader_device, monpriv); 322 dev_set_drvdata(monreader_device, monpriv);
324 unlock_kernel();
325 return nonseekable_open(inode, filp); 323 return nonseekable_open(inode, filp);
326 324
327out_path: 325out_path:
@@ -331,7 +329,6 @@ out_priv:
331out_use: 329out_use:
332 clear_bit(MON_IN_USE, &mon_in_use); 330 clear_bit(MON_IN_USE, &mon_in_use);
333out: 331out:
334 unlock_kernel();
335 return rc; 332 return rc;
336} 333}
337 334
@@ -533,7 +530,7 @@ static int monreader_restore(struct device *dev)
533 return monreader_thaw(dev); 530 return monreader_thaw(dev);
534} 531}
535 532
536static struct dev_pm_ops monreader_pm_ops = { 533static const struct dev_pm_ops monreader_pm_ops = {
537 .freeze = monreader_freeze, 534 .freeze = monreader_freeze,
538 .thaw = monreader_thaw, 535 .thaw = monreader_thaw,
539 .restore = monreader_restore, 536 .restore = monreader_restore,
@@ -607,6 +604,10 @@ static int __init mon_init(void)
607 } 604 }
608 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 605 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
609 606
607 /*
608 * misc_register() has to be the last action in module_init(), because
609 * file operations will be available right after this.
610 */
610 rc = misc_register(&mon_dev); 611 rc = misc_register(&mon_dev);
611 if (rc < 0 ) 612 if (rc < 0 )
612 goto out; 613 goto out;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 66fb8eba93f4..98a49dfda1de 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -13,7 +13,6 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/smp_lock.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -21,6 +20,7 @@
21#include <linux/poll.h> 20#include <linux/poll.h>
22#include <linux/mutex.h> 21#include <linux/mutex.h>
23#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
@@ -185,13 +185,11 @@ static int monwrite_open(struct inode *inode, struct file *filp)
185 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 185 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
186 if (!monpriv) 186 if (!monpriv)
187 return -ENOMEM; 187 return -ENOMEM;
188 lock_kernel();
189 INIT_LIST_HEAD(&monpriv->list); 188 INIT_LIST_HEAD(&monpriv->list);
190 monpriv->hdr_to_read = sizeof(monpriv->hdr); 189 monpriv->hdr_to_read = sizeof(monpriv->hdr);
191 mutex_init(&monpriv->thread_mutex); 190 mutex_init(&monpriv->thread_mutex);
192 filp->private_data = monpriv; 191 filp->private_data = monpriv;
193 list_add_tail(&monpriv->priv_list, &mon_priv_list); 192 list_add_tail(&monpriv->priv_list, &mon_priv_list);
194 unlock_kernel();
195 return nonseekable_open(inode, filp); 193 return nonseekable_open(inode, filp);
196} 194}
197 195
@@ -326,7 +324,7 @@ static int monwriter_thaw(struct device *dev)
326 return monwriter_restore(dev); 324 return monwriter_restore(dev);
327} 325}
328 326
329static struct dev_pm_ops monwriter_pm_ops = { 327static const struct dev_pm_ops monwriter_pm_ops = {
330 .freeze = monwriter_freeze, 328 .freeze = monwriter_freeze,
331 .thaw = monwriter_thaw, 329 .thaw = monwriter_thaw,
332 .restore = monwriter_restore, 330 .restore = monwriter_restore,
@@ -364,6 +362,10 @@ static int __init mon_init(void)
364 goto out_driver; 362 goto out_driver;
365 } 363 }
366 364
365 /*
366 * misc_register() has to be the last action in module_init(), because
367 * file operations will be available right after this.
368 */
367 rc = misc_register(&mon_dev); 369 rc = misc_register(&mon_dev);
368 if (rc) 370 if (rc)
369 goto out_device; 371 goto out_device;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 62ddf5202b79..2a4c566456e7 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -373,7 +373,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
373 rq->rc = ccw_device_start(rp->cdev, &rq->ccw, 373 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
374 (unsigned long) rq, 0, 0); 374 (unsigned long) rq, 0, 0);
375 if (rq->rc == 0) 375 if (rq->rc == 0)
376 return; /* Sucessfully restarted. */ 376 return; /* Successfully restarted. */
377 break; 377 break;
378 case RAW3270_IO_STOP: 378 case RAW3270_IO_STOP:
379 if (!rq) 379 if (!rq)
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a983f5086788..f6d72e1f2a38 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -196,7 +196,7 @@ __sclp_start_request(struct sclp_req *req)
196 req->start_count++; 196 req->start_count++;
197 197
198 if (rc == 0) { 198 if (rc == 0) {
199 /* Sucessfully started request */ 199 /* Successfully started request */
200 req->status = SCLP_REQ_RUNNING; 200 req->status = SCLP_REQ_RUNNING;
201 sclp_running_state = sclp_running_state_running; 201 sclp_running_state = sclp_running_state_running;
202 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 202 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
@@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev)
1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); 1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1020} 1020}
1021 1021
1022static struct dev_pm_ops sclp_pm_ops = { 1022static const struct dev_pm_ops sclp_pm_ops = {
1023 .freeze = sclp_freeze, 1023 .freeze = sclp_freeze,
1024 .thaw = sclp_thaw, 1024 .thaw = sclp_thaw,
1025 .restore = sclp_restore, 1025 .restore = sclp_restore,
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index b44462a6c6d3..7ad30e72f868 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -11,6 +11,7 @@
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/stat.h> 12#include <linux/stat.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/slab.h>
14#include <linux/ctype.h> 15#include <linux/ctype.h>
15#include <linux/kmod.h> 16#include <linux/kmod.h>
16#include <linux/err.h> 17#include <linux/err.h>
@@ -84,6 +85,7 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
84 rc = copy_from_user(buf, buffer, sizeof(buf)); 85 rc = copy_from_user(buf, buffer, sizeof(buf));
85 if (rc != 0) 86 if (rc != 0)
86 return -EFAULT; 87 return -EFAULT;
88 buf[sizeof(buf) - 1] = '\0';
87 if (strict_strtoul(buf, 0, &val) != 0) 89 if (strict_strtoul(buf, 0, &val) != 0)
88 return -EINVAL; 90 return -EINVAL;
89 if (val != 0 && val != 1) 91 if (val != 0 && val != 1)
@@ -101,18 +103,17 @@ static struct ctl_table callhome_table[] = {
101 .mode = 0644, 103 .mode = 0644,
102 .proc_handler = proc_handler_callhome, 104 .proc_handler = proc_handler_callhome,
103 }, 105 },
104 { .ctl_name = 0 } 106 {}
105}; 107};
106 108
107static struct ctl_table kern_dir_table[] = { 109static struct ctl_table kern_dir_table[] = {
108 { 110 {
109 .ctl_name = CTL_KERN,
110 .procname = "kernel", 111 .procname = "kernel",
111 .maxlen = 0, 112 .maxlen = 0,
112 .mode = 0555, 113 .mode = 0555,
113 .child = callhome_table, 114 .child = callhome_table,
114 }, 115 },
115 { .ctl_name = 0 } 116 {}
116}; 117};
117 118
118/* 119/*
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 5cc11c636d38..4b60ede07f0e 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -84,6 +84,7 @@ static void __init sclp_read_info_early(void)
84 do { 84 do {
85 memset(sccb, 0, sizeof(*sccb)); 85 memset(sccb, 0, sizeof(*sccb));
86 sccb->header.length = sizeof(*sccb); 86 sccb->header.length = sizeof(*sccb);
87 sccb->header.function_code = 0x80;
87 sccb->header.control_mask[2] = 0x80; 88 sccb->header.control_mask[2] = 0x80;
88 rc = sclp_cmd_sync_early(commands[i], sccb); 89 rc = sclp_cmd_sync_early(commands[i], sccb);
89 } while (rc == -EBUSY); 90 } while (rc == -EBUSY);
@@ -307,6 +308,13 @@ struct assign_storage_sccb {
307 u16 rn; 308 u16 rn;
308} __packed; 309} __packed;
309 310
311int arch_get_memory_phys_device(unsigned long start_pfn)
312{
313 if (!rzm)
314 return 0;
315 return PFN_PHYS(start_pfn) >> ilog2(rzm);
316}
317
310static unsigned long long rn2addr(u16 rn) 318static unsigned long long rn2addr(u16 rn)
311{ 319{
312 return (unsigned long long) (rn - 1) * rzm; 320 return (unsigned long long) (rn - 1) * rzm;
@@ -546,7 +554,7 @@ struct read_storage_sccb {
546 u32 entries[0]; 554 u32 entries[0];
547} __packed; 555} __packed;
548 556
549static struct dev_pm_ops sclp_mem_pm_ops = { 557static const struct dev_pm_ops sclp_mem_pm_ops = {
550 .freeze = sclp_mem_freeze, 558 .freeze = sclp_mem_freeze,
551}; 559};
552 560
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index ad698d30cb3b..ecf45c54f8c4 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -14,6 +14,7 @@
14#include <linux/termios.h> 14#include <linux/termios.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/gfp.h>
17 18
18#include "sclp.h" 19#include "sclp.h"
19#include "sclp_rw.h" 20#include "sclp_rw.h"
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 434ba04b1309..8258d590505f 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -13,10 +13,10 @@
13#include <linux/tty.h> 13#include <linux/tty.h>
14#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
15#include <linux/tty_flip.h> 15#include <linux/tty_flip.h>
16#include <linux/slab.h>
17#include <linux/err.h> 16#include <linux/err.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/gfp.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21 21
22#include "ctrlchar.h" 22#include "ctrlchar.h"
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9d2a007e93b..5d706e6c946f 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/reboot.h> 25#include <linux/reboot.h>
26#include <linux/slab.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include "sclp.h" 29#include "sclp.h"
@@ -495,6 +496,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495 if (tty->driver_data == NULL) 496 if (tty->driver_data == NULL)
496 return -ENOMEM; 497 return -ENOMEM;
497 tty->low_latency = 0; 498 tty->low_latency = 0;
499 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
500 tty->winsize.ws_row = 24;
501 tty->winsize.ws_col = 80;
502 }
498 } 503 }
499 return 0; 504 return 0;
500} 505}
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index a26333774701..7a242f073632 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -212,6 +212,9 @@ struct tape_device {
212 struct tape_class_device * nt; 212 struct tape_class_device * nt;
213 struct tape_class_device * rt; 213 struct tape_class_device * rt;
214 214
215 /* Device mutex to serialize tape commands. */
216 struct mutex mutex;
217
215 /* Device discipline information. */ 218 /* Device discipline information. */
216 struct tape_discipline * discipline; 219 struct tape_discipline * discipline;
217 void * discdata; 220 void * discdata;
@@ -292,9 +295,9 @@ extern int tape_generic_pm_suspend(struct ccw_device *);
292extern int tape_generic_probe(struct ccw_device *); 295extern int tape_generic_probe(struct ccw_device *);
293extern void tape_generic_remove(struct ccw_device *); 296extern void tape_generic_remove(struct ccw_device *);
294 297
295extern struct tape_device *tape_get_device(int devindex); 298extern struct tape_device *tape_find_device(int devindex);
296extern struct tape_device *tape_get_device_reference(struct tape_device *); 299extern struct tape_device *tape_get_device(struct tape_device *);
297extern struct tape_device *tape_put_device(struct tape_device *); 300extern void tape_put_device(struct tape_device *);
298 301
299/* Externals from tape_char.c */ 302/* Externals from tape_char.c */
300extern int tapechar_init(void); 303extern int tapechar_init(void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2fe45ff77b75..c17f35b6136a 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -9,11 +9,13 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_34xx" 11#define KMSG_COMPONENT "tape_34xx"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/bio.h> 16#include <linux/bio.h>
16#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/slab.h>
17 19
18#define TAPE_DBF_AREA tape_34xx_dbf 20#define TAPE_DBF_AREA tape_34xx_dbf
19 21
@@ -113,16 +115,16 @@ tape_34xx_work_handler(struct work_struct *work)
113{ 115{
114 struct tape_34xx_work *p = 116 struct tape_34xx_work *p =
115 container_of(work, struct tape_34xx_work, work); 117 container_of(work, struct tape_34xx_work, work);
118 struct tape_device *device = p->device;
116 119
117 switch(p->op) { 120 switch(p->op) {
118 case TO_MSEN: 121 case TO_MSEN:
119 tape_34xx_medium_sense(p->device); 122 tape_34xx_medium_sense(device);
120 break; 123 break;
121 default: 124 default:
122 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 125 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
123 } 126 }
124 127 tape_put_device(device);
125 p->device = tape_put_device(p->device);
126 kfree(p); 128 kfree(p);
127} 129}
128 130
@@ -136,7 +138,7 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
136 138
137 INIT_WORK(&p->work, tape_34xx_work_handler); 139 INIT_WORK(&p->work, tape_34xx_work_handler);
138 140
139 p->device = tape_get_device_reference(device); 141 p->device = tape_get_device(device);
140 p->op = op; 142 p->op = op;
141 143
142 schedule_work(&p->work); 144 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e4cc3aae9162..fc993acf99b6 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -9,8 +9,10 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_3590" 11#define KMSG_COMPONENT "tape_3590"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/bio.h> 17#include <linux/bio.h>
16#include <asm/ebcdic.h> 18#include <asm/ebcdic.h>
@@ -136,7 +138,7 @@ static void int_to_ext_kekl(struct tape3592_kekl *in,
136 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 138 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
137 memcpy(out->label, in->label, sizeof(in->label)); 139 memcpy(out->label, in->label, sizeof(in->label));
138 EBCASC(out->label, sizeof(in->label)); 140 EBCASC(out->label, sizeof(in->label));
139 strstrip(out->label); 141 strim(out->label);
140} 142}
141 143
142static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, 144static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
@@ -608,7 +610,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
608 610
609 INIT_WORK(&p->work, tape_3590_work_handler); 611 INIT_WORK(&p->work, tape_3590_work_handler);
610 612
611 p->device = tape_get_device_reference(device); 613 p->device = tape_get_device(device);
612 p->op = op; 614 p->op = op;
613 615
614 schedule_work(&p->work); 616 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 0c0705b91c28..097da8ce6be6 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape" 13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 15
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/module.h> 17#include <linux/module.h>
@@ -45,8 +46,6 @@
45 */ 46 */
46static int tapeblock_open(struct block_device *, fmode_t); 47static int tapeblock_open(struct block_device *, fmode_t);
47static int tapeblock_release(struct gendisk *, fmode_t); 48static int tapeblock_release(struct gendisk *, fmode_t);
48static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
49 unsigned long);
50static int tapeblock_medium_changed(struct gendisk *); 49static int tapeblock_medium_changed(struct gendisk *);
51static int tapeblock_revalidate_disk(struct gendisk *); 50static int tapeblock_revalidate_disk(struct gendisk *);
52 51
@@ -54,7 +53,6 @@ static const struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
55 .open = tapeblock_open, 54 .open = tapeblock_open,
56 .release = tapeblock_release, 55 .release = tapeblock_release,
57 .locked_ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed, 56 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk, 57 .revalidate_disk = tapeblock_revalidate_disk,
60}; 58};
@@ -224,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device)
224 goto cleanup_queue; 222 goto cleanup_queue;
225 223
226 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
227 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 225 blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
228 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 226 blk_queue_max_segments(blkdat->request_queue, -1L);
229 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
230 blk_queue_max_segment_size(blkdat->request_queue, -1L); 227 blk_queue_max_segment_size(blkdat->request_queue, -1L);
231 blk_queue_segment_boundary(blkdat->request_queue, -1L); 228 blk_queue_segment_boundary(blkdat->request_queue, -1L);
232 229
@@ -239,7 +236,7 @@ tapeblock_setup_device(struct tape_device * device)
239 disk->major = tapeblock_major; 236 disk->major = tapeblock_major;
240 disk->first_minor = device->first_minor; 237 disk->first_minor = device->first_minor;
241 disk->fops = &tapeblock_fops; 238 disk->fops = &tapeblock_fops;
242 disk->private_data = tape_get_device_reference(device); 239 disk->private_data = tape_get_device(device);
243 disk->queue = blkdat->request_queue; 240 disk->queue = blkdat->request_queue;
244 set_capacity(disk, 0); 241 set_capacity(disk, 0);
245 sprintf(disk->disk_name, "btibm%d", 242 sprintf(disk->disk_name, "btibm%d",
@@ -247,11 +244,11 @@ tapeblock_setup_device(struct tape_device * device)
247 244
248 blkdat->disk = disk; 245 blkdat->disk = disk;
249 blkdat->medium_changed = 1; 246 blkdat->medium_changed = 1;
250 blkdat->request_queue->queuedata = tape_get_device_reference(device); 247 blkdat->request_queue->queuedata = tape_get_device(device);
251 248
252 add_disk(disk); 249 add_disk(disk);
253 250
254 tape_get_device_reference(device); 251 tape_get_device(device);
255 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); 252 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
256 253
257 return 0; 254 return 0;
@@ -274,13 +271,14 @@ tapeblock_cleanup_device(struct tape_device *device)
274 } 271 }
275 272
276 del_gendisk(device->blk_data.disk); 273 del_gendisk(device->blk_data.disk);
277 device->blk_data.disk->private_data = 274 device->blk_data.disk->private_data = NULL;
278 tape_put_device(device->blk_data.disk->private_data); 275 tape_put_device(device);
279 put_disk(device->blk_data.disk); 276 put_disk(device->blk_data.disk);
280 277
281 device->blk_data.disk = NULL; 278 device->blk_data.disk = NULL;
282cleanup_queue: 279cleanup_queue:
283 device->blk_data.request_queue->queuedata = tape_put_device(device); 280 device->blk_data.request_queue->queuedata = NULL;
281 tape_put_device(device);
284 282
285 blk_cleanup_queue(device->blk_data.request_queue); 283 blk_cleanup_queue(device->blk_data.request_queue);
286 device->blk_data.request_queue = NULL; 284 device->blk_data.request_queue = NULL;
@@ -363,7 +361,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
363 struct tape_device * device; 361 struct tape_device * device;
364 int rc; 362 int rc;
365 363
366 device = tape_get_device_reference(disk->private_data); 364 device = tape_get_device(disk->private_data);
367 365
368 if (device->required_tapemarks) { 366 if (device->required_tapemarks) {
369 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 367 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
@@ -414,42 +412,6 @@ tapeblock_release(struct gendisk *disk, fmode_t mode)
414} 412}
415 413
416/* 414/*
417 * Support of some generic block device IOCTLs.
418 */
419static int
420tapeblock_ioctl(
421 struct block_device * bdev,
422 fmode_t mode,
423 unsigned int command,
424 unsigned long arg
425) {
426 int rc;
427 int minor;
428 struct gendisk *disk = bdev->bd_disk;
429 struct tape_device *device;
430
431 rc = 0;
432 BUG_ON(!disk);
433 device = disk->private_data;
434 BUG_ON(!device);
435 minor = MINOR(bdev->bd_dev);
436
437 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
438 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
439
440 switch (command) {
441 /* Refuse some IOCTL calls without complaining (mount). */
442 case 0x5310: /* CDROMMULTISESSION */
443 rc = -EINVAL;
444 break;
445 default:
446 rc = -EINVAL;
447 }
448
449 return rc;
450}
451
452/*
453 * Initialize block device frontend. 415 * Initialize block device frontend.
454 */ 416 */
455int 417int
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31566c55adfe..539045acaad4 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -10,11 +10,15 @@
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
13#include <linux/module.h> 16#include <linux/module.h>
14#include <linux/types.h> 17#include <linux/types.h>
15#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
16#include <linux/mtio.h> 19#include <linux/mtio.h>
17#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/compat.h>
18 22
19#include <asm/uaccess.h> 23#include <asm/uaccess.h>
20 24
@@ -33,18 +37,20 @@ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
33static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); 37static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
34static int tapechar_open(struct inode *,struct file *); 38static int tapechar_open(struct inode *,struct file *);
35static int tapechar_release(struct inode *,struct file *); 39static int tapechar_release(struct inode *,struct file *);
36static int tapechar_ioctl(struct inode *, struct file *, unsigned int, 40static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
37 unsigned long); 41#ifdef CONFIG_COMPAT
38static long tapechar_compat_ioctl(struct file *, unsigned int, 42static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
39 unsigned long); 43#endif
40 44
41static const struct file_operations tape_fops = 45static const struct file_operations tape_fops =
42{ 46{
43 .owner = THIS_MODULE, 47 .owner = THIS_MODULE,
44 .read = tapechar_read, 48 .read = tapechar_read,
45 .write = tapechar_write, 49 .write = tapechar_write,
46 .ioctl = tapechar_ioctl, 50 .unlocked_ioctl = tapechar_ioctl,
51#ifdef CONFIG_COMPAT
47 .compat_ioctl = tapechar_compat_ioctl, 52 .compat_ioctl = tapechar_compat_ioctl,
53#endif
48 .open = tapechar_open, 54 .open = tapechar_open,
49 .release = tapechar_release, 55 .release = tapechar_release,
50}; 56};
@@ -170,7 +176,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
170 if (rc == 0) { 176 if (rc == 0) {
171 rc = block_size - request->rescnt; 177 rc = block_size - request->rescnt;
172 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); 178 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
173 filp->f_pos += rc;
174 /* Copy data from idal buffer to user space. */ 179 /* Copy data from idal buffer to user space. */
175 if (idal_buffer_to_user(device->char_data.idal_buf, 180 if (idal_buffer_to_user(device->char_data.idal_buf,
176 data, rc) != 0) 181 data, rc) != 0)
@@ -238,7 +243,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
238 break; 243 break;
239 DBF_EVENT(6, "TCHAR:wbytes: %lx\n", 244 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
240 block_size - request->rescnt); 245 block_size - request->rescnt);
241 filp->f_pos += block_size - request->rescnt;
242 written += block_size - request->rescnt; 246 written += block_size - request->rescnt;
243 if (request->rescnt != 0) 247 if (request->rescnt != 0)
244 break; 248 break;
@@ -286,26 +290,20 @@ tapechar_open (struct inode *inode, struct file *filp)
286 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 290 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
287 return -ENODEV; 291 return -ENODEV;
288 292
289 lock_kernel();
290 minor = iminor(filp->f_path.dentry->d_inode); 293 minor = iminor(filp->f_path.dentry->d_inode);
291 device = tape_get_device(minor / TAPE_MINORS_PER_DEV); 294 device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
292 if (IS_ERR(device)) { 295 if (IS_ERR(device)) {
293 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); 296 DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
294 rc = PTR_ERR(device); 297 return PTR_ERR(device);
295 goto out;
296 } 298 }
297 299
298
299 rc = tape_open(device); 300 rc = tape_open(device);
300 if (rc == 0) { 301 if (rc == 0) {
301 filp->private_data = device; 302 filp->private_data = device;
302 rc = nonseekable_open(inode, filp); 303 nonseekable_open(inode, filp);
303 } 304 } else
304 else
305 tape_put_device(device); 305 tape_put_device(device);
306 306
307out:
308 unlock_kernel();
309 return rc; 307 return rc;
310} 308}
311 309
@@ -342,7 +340,8 @@ tapechar_release(struct inode *inode, struct file *filp)
342 device->char_data.idal_buf = NULL; 340 device->char_data.idal_buf = NULL;
343 } 341 }
344 tape_release(device); 342 tape_release(device);
345 filp->private_data = tape_put_device(device); 343 filp->private_data = NULL;
344 tape_put_device(device);
346 345
347 return 0; 346 return 0;
348} 347}
@@ -351,16 +350,11 @@ tapechar_release(struct inode *inode, struct file *filp)
351 * Tape device io controls. 350 * Tape device io controls.
352 */ 351 */
353static int 352static int
354tapechar_ioctl(struct inode *inp, struct file *filp, 353__tapechar_ioctl(struct tape_device *device,
355 unsigned int no, unsigned long data) 354 unsigned int no, unsigned long data)
356{ 355{
357 struct tape_device *device;
358 int rc; 356 int rc;
359 357
360 DBF_EVENT(6, "TCHAR:ioct\n");
361
362 device = (struct tape_device *) filp->private_data;
363
364 if (no == MTIOCTOP) { 358 if (no == MTIOCTOP) {
365 struct mtop op; 359 struct mtop op;
366 360
@@ -453,21 +447,44 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
453} 447}
454 448
455static long 449static long
450tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
451{
452 struct tape_device *device;
453 long rc;
454
455 DBF_EVENT(6, "TCHAR:ioct\n");
456
457 device = (struct tape_device *) filp->private_data;
458 mutex_lock(&device->mutex);
459 rc = __tapechar_ioctl(device, no, data);
460 mutex_unlock(&device->mutex);
461 return rc;
462}
463
464#ifdef CONFIG_COMPAT
465static long
456tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 466tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
457{ 467{
458 struct tape_device *device = filp->private_data; 468 struct tape_device *device = filp->private_data;
459 int rval = -ENOIOCTLCMD; 469 int rval = -ENOIOCTLCMD;
470 unsigned long argp;
460 471
472 /* The 'arg' argument of any ioctl function may only be used for
473 * pointers because of the compat pointer conversion.
474 * Consider this when adding new ioctls.
475 */
476 argp = (unsigned long) compat_ptr(data);
461 if (device->discipline->ioctl_fn) { 477 if (device->discipline->ioctl_fn) {
462 lock_kernel(); 478 mutex_lock(&device->mutex);
463 rval = device->discipline->ioctl_fn(device, no, data); 479 rval = device->discipline->ioctl_fn(device, no, argp);
464 unlock_kernel(); 480 mutex_unlock(&device->mutex);
465 if (rval == -EINVAL) 481 if (rval == -EINVAL)
466 rval = -ENOIOCTLCMD; 482 rval = -ENOIOCTLCMD;
467 } 483 }
468 484
469 return rval; 485 return rval;
470} 486}
487#endif /* CONFIG_COMPAT */
471 488
472/* 489/*
473 * Initialize character device frontend. 490 * Initialize character device frontend.
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index ddc914ccea8f..55343df61edd 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -7,6 +7,12 @@
7 * Author: Stefan Bader <shbader@de.ibm.com> 7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H 8 * Based on simple class device code by Greg K-H
9 */ 9 */
10
11#define KMSG_COMPONENT "tape"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/slab.h>
15
10#include "tape_class.h" 16#include "tape_class.h"
11 17
12MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); 18MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5cd31e071647..29c2d73d719d 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -12,12 +12,15 @@
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/init.h> // for kernel parameters 18#include <linux/init.h> // for kernel parameters
17#include <linux/kmod.h> // for requesting modules 19#include <linux/kmod.h> // for requesting modules
18#include <linux/spinlock.h> // for locks 20#include <linux/spinlock.h> // for locks
19#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
20#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/slab.h>
21 24
22#include <asm/types.h> // for variable types 25#include <asm/types.h> // for variable types
23 26
@@ -492,6 +495,7 @@ tape_alloc_device(void)
492 kfree(device); 495 kfree(device);
493 return ERR_PTR(-ENOMEM); 496 return ERR_PTR(-ENOMEM);
494 } 497 }
498 mutex_init(&device->mutex);
495 INIT_LIST_HEAD(&device->req_queue); 499 INIT_LIST_HEAD(&device->req_queue);
496 INIT_LIST_HEAD(&device->node); 500 INIT_LIST_HEAD(&device->node);
497 init_waitqueue_head(&device->state_change_wq); 501 init_waitqueue_head(&device->state_change_wq);
@@ -511,11 +515,12 @@ tape_alloc_device(void)
511 * increment the reference count. 515 * increment the reference count.
512 */ 516 */
513struct tape_device * 517struct tape_device *
514tape_get_device_reference(struct tape_device *device) 518tape_get_device(struct tape_device *device)
515{ 519{
516 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 520 int count;
517 atomic_inc_return(&device->ref_count));
518 521
522 count = atomic_inc_return(&device->ref_count);
523 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
519 return device; 524 return device;
520} 525}
521 526
@@ -525,32 +530,25 @@ tape_get_device_reference(struct tape_device *device)
525 * The function returns a NULL pointer to be used by the caller 530 * The function returns a NULL pointer to be used by the caller
526 * for clearing reference pointers. 531 * for clearing reference pointers.
527 */ 532 */
528struct tape_device * 533void
529tape_put_device(struct tape_device *device) 534tape_put_device(struct tape_device *device)
530{ 535{
531 int remain; 536 int count;
532 537
533 remain = atomic_dec_return(&device->ref_count); 538 count = atomic_dec_return(&device->ref_count);
534 if (remain > 0) { 539 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
535 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 540 BUG_ON(count < 0);
536 } else { 541 if (count == 0) {
537 if (remain < 0) { 542 kfree(device->modeset_byte);
538 DBF_EVENT(4, "put device without reference\n"); 543 kfree(device);
539 } else {
540 DBF_EVENT(4, "tape_free_device(%p)\n", device);
541 kfree(device->modeset_byte);
542 kfree(device);
543 }
544 } 544 }
545
546 return NULL;
547} 545}
548 546
549/* 547/*
550 * Find tape device by a device index. 548 * Find tape device by a device index.
551 */ 549 */
552struct tape_device * 550struct tape_device *
553tape_get_device(int devindex) 551tape_find_device(int devindex)
554{ 552{
555 struct tape_device *device, *tmp; 553 struct tape_device *device, *tmp;
556 554
@@ -558,7 +556,7 @@ tape_get_device(int devindex)
558 read_lock(&tape_device_lock); 556 read_lock(&tape_device_lock);
559 list_for_each_entry(tmp, &tape_device_list, node) { 557 list_for_each_entry(tmp, &tape_device_list, node) {
560 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 558 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
561 device = tape_get_device_reference(tmp); 559 device = tape_get_device(tmp);
562 break; 560 break;
563 } 561 }
564 } 562 }
@@ -579,7 +577,8 @@ tape_generic_probe(struct ccw_device *cdev)
579 device = tape_alloc_device(); 577 device = tape_alloc_device();
580 if (IS_ERR(device)) 578 if (IS_ERR(device))
581 return -ENODEV; 579 return -ENODEV;
582 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 580 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
581 CCWDEV_DO_MULTIPATH);
583 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 582 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
584 if (ret) { 583 if (ret) {
585 tape_put_device(device); 584 tape_put_device(device);
@@ -606,7 +605,8 @@ __tape_discard_requests(struct tape_device *device)
606 list_del(&request->list); 605 list_del(&request->list);
607 606
608 /* Decrease ref_count for removed request. */ 607 /* Decrease ref_count for removed request. */
609 request->device = tape_put_device(device); 608 request->device = NULL;
609 tape_put_device(device);
610 request->rc = -EIO; 610 request->rc = -EIO;
611 if (request->callback != NULL) 611 if (request->callback != NULL)
612 request->callback(request, request->callback_data); 612 request->callback(request, request->callback_data);
@@ -664,9 +664,11 @@ tape_generic_remove(struct ccw_device *cdev)
664 tape_cleanup_device(device); 664 tape_cleanup_device(device);
665 } 665 }
666 666
667 if (!dev_get_drvdata(&cdev->dev)) { 667 device = dev_get_drvdata(&cdev->dev);
668 if (device) {
668 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 669 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
669 dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev))); 670 dev_set_drvdata(&cdev->dev, NULL);
671 tape_put_device(device);
670 } 672 }
671} 673}
672 674
@@ -721,9 +723,8 @@ tape_free_request (struct tape_request * request)
721{ 723{
722 DBF_LH(6, "Free request %p\n", request); 724 DBF_LH(6, "Free request %p\n", request);
723 725
724 if (request->device != NULL) { 726 if (request->device)
725 request->device = tape_put_device(request->device); 727 tape_put_device(request->device);
726 }
727 kfree(request->cpdata); 728 kfree(request->cpdata);
728 kfree(request->cpaddr); 729 kfree(request->cpaddr);
729 kfree(request); 730 kfree(request);
@@ -838,7 +839,8 @@ static void tape_long_busy_timeout(unsigned long data)
838 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 839 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
839 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 840 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
840 __tape_start_next_request(device); 841 __tape_start_next_request(device);
841 device->lb_timeout.data = (unsigned long) tape_put_device(device); 842 device->lb_timeout.data = 0UL;
843 tape_put_device(device);
842 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 844 spin_unlock_irq(get_ccwdev_lock(device->cdev));
843} 845}
844 846
@@ -918,7 +920,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
918 } 920 }
919 921
920 /* Increase use count of device for the added request. */ 922 /* Increase use count of device for the added request. */
921 request->device = tape_get_device_reference(device); 923 request->device = tape_get_device(device);
922 924
923 if (list_empty(&device->req_queue)) { 925 if (list_empty(&device->req_queue)) {
924 /* No other requests are on the queue. Start this one. */ 926 /* No other requests are on the queue. Start this one. */
@@ -1117,8 +1119,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1117 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1119 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1118 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1120 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1119 if (del_timer(&device->lb_timeout)) { 1121 if (del_timer(&device->lb_timeout)) {
1120 device->lb_timeout.data = (unsigned long) 1122 device->lb_timeout.data = 0UL;
1121 tape_put_device(device); 1123 tape_put_device(device);
1122 __tape_start_next_request(device); 1124 __tape_start_next_request(device);
1123 } 1125 }
1124 return; 1126 return;
@@ -1173,7 +1175,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1173 break; 1175 break;
1174 case TAPE_IO_LONG_BUSY: 1176 case TAPE_IO_LONG_BUSY:
1175 device->lb_timeout.data = 1177 device->lb_timeout.data =
1176 (unsigned long)tape_get_device_reference(device); 1178 (unsigned long) tape_get_device(device);
1177 device->lb_timeout.expires = jiffies + 1179 device->lb_timeout.expires = jiffies +
1178 LONG_BUSY_TIMEOUT * HZ; 1180 LONG_BUSY_TIMEOUT * HZ;
1179 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1181 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
@@ -1326,7 +1328,7 @@ EXPORT_SYMBOL(tape_generic_online);
1326EXPORT_SYMBOL(tape_generic_offline); 1328EXPORT_SYMBOL(tape_generic_offline);
1327EXPORT_SYMBOL(tape_generic_pm_suspend); 1329EXPORT_SYMBOL(tape_generic_pm_suspend);
1328EXPORT_SYMBOL(tape_put_device); 1330EXPORT_SYMBOL(tape_put_device);
1329EXPORT_SYMBOL(tape_get_device_reference); 1331EXPORT_SYMBOL(tape_get_device);
1330EXPORT_SYMBOL(tape_state_verbose); 1332EXPORT_SYMBOL(tape_state_verbose);
1331EXPORT_SYMBOL(tape_op_verbose); 1333EXPORT_SYMBOL(tape_op_verbose);
1332EXPORT_SYMBOL(tape_state_set); 1334EXPORT_SYMBOL(tape_state_set);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 202f42132939..0ceb37984f77 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -11,6 +11,9 @@
11 * PROCFS Functions 11 * PROCFS Functions
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/module.h> 17#include <linux/module.h>
15#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
16#include <linux/seq_file.h> 19#include <linux/seq_file.h>
@@ -45,7 +48,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
45 seq_printf(m, "TapeNo\tBusID CuType/Model\t" 48 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); 49 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
47 } 50 }
48 device = tape_get_device(n); 51 device = tape_find_device(n);
49 if (IS_ERR(device)) 52 if (IS_ERR(device))
50 return 0; 53 return 0;
51 spin_lock_irq(get_ccwdev_lock(device->cdev)); 54 spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 750354ad16e5..03f07e5dd6e9 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -11,6 +11,9 @@
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/stddef.h> 17#include <linux/stddef.h>
15#include <linux/kernel.h> 18#include <linux/kernel.h>
16#include <linux/bio.h> 19#include <linux/bio.h>
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 38385677c653..911822db614d 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/compat.h>
22 23
23#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
24#include <asm/cio.h> 25#include <asm/cio.h>
@@ -1731,6 +1732,22 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
1731 return kbd_ioctl(tp->kbd, file, cmd, arg); 1732 return kbd_ioctl(tp->kbd, file, cmd, arg);
1732} 1733}
1733 1734
1735#ifdef CONFIG_COMPAT
1736static long
1737tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
1738 unsigned int cmd, unsigned long arg)
1739{
1740 struct tty3270 *tp;
1741
1742 tp = tty->driver_data;
1743 if (!tp)
1744 return -ENODEV;
1745 if (tty->flags & (1 << TTY_IO_ERROR))
1746 return -EIO;
1747 return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg));
1748}
1749#endif
1750
1734static const struct tty_operations tty3270_ops = { 1751static const struct tty_operations tty3270_ops = {
1735 .open = tty3270_open, 1752 .open = tty3270_open,
1736 .close = tty3270_close, 1753 .close = tty3270_close,
@@ -1745,6 +1762,9 @@ static const struct tty_operations tty3270_ops = {
1745 .hangup = tty3270_hangup, 1762 .hangup = tty3270_hangup,
1746 .wait_until_sent = tty3270_wait_until_sent, 1763 .wait_until_sent = tty3270_wait_until_sent,
1747 .ioctl = tty3270_ioctl, 1764 .ioctl = tty3270_ioctl,
1765#ifdef CONFIG_COMPAT
1766 .compat_ioctl = tty3270_compat_ioctl,
1767#endif
1748 .set_termios = tty3270_set_termios 1768 .set_termios = tty3270_set_termios
1749}; 1769};
1750 1770
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index a6087cec55b4..5bb59d36a6d4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -19,6 +19,8 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h>
23#include <asm/compat.h>
22#include <asm/cpcmd.h> 24#include <asm/cpcmd.h>
23#include <asm/debug.h> 25#include <asm/debug.h>
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
@@ -139,21 +141,26 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
139static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 141static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
140{ 142{
141 struct vmcp_session *session; 143 struct vmcp_session *session;
144 int __user *argp;
142 int temp; 145 int temp;
143 146
144 session = (struct vmcp_session *)file->private_data; 147 session = (struct vmcp_session *)file->private_data;
148 if (is_compat_task())
149 argp = compat_ptr(arg);
150 else
151 argp = (int __user *)arg;
145 if (mutex_lock_interruptible(&session->mutex)) 152 if (mutex_lock_interruptible(&session->mutex))
146 return -ERESTARTSYS; 153 return -ERESTARTSYS;
147 switch (cmd) { 154 switch (cmd) {
148 case VMCP_GETCODE: 155 case VMCP_GETCODE:
149 temp = session->resp_code; 156 temp = session->resp_code;
150 mutex_unlock(&session->mutex); 157 mutex_unlock(&session->mutex);
151 return put_user(temp, (int __user *)arg); 158 return put_user(temp, argp);
152 case VMCP_SETBUF: 159 case VMCP_SETBUF:
153 free_pages((unsigned long)session->response, 160 free_pages((unsigned long)session->response,
154 get_order(session->bufsize)); 161 get_order(session->bufsize));
155 session->response=NULL; 162 session->response=NULL;
156 temp = get_user(session->bufsize, (int __user *)arg); 163 temp = get_user(session->bufsize, argp);
157 if (get_order(session->bufsize) > 8) { 164 if (get_order(session->bufsize) > 8) {
158 session->bufsize = PAGE_SIZE; 165 session->bufsize = PAGE_SIZE;
159 temp = -EINVAL; 166 temp = -EINVAL;
@@ -163,7 +170,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
163 case VMCP_GETSIZE: 170 case VMCP_GETSIZE:
164 temp = session->resp_size; 171 temp = session->resp_size;
165 mutex_unlock(&session->mutex); 172 mutex_unlock(&session->mutex);
166 return put_user(temp, (int __user *)arg); 173 return put_user(temp, argp);
167 default: 174 default:
168 mutex_unlock(&session->mutex); 175 mutex_unlock(&session->mutex);
169 return -ENOIOCTLCMD; 176 return -ENOIOCTLCMD;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d1a142fa3eb4..e40a1b892866 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/slab.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
@@ -312,11 +313,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
312 return -ENOSYS; 313 return -ENOSYS;
313 314
314 /* Besure this device hasn't already been opened */ 315 /* Besure this device hasn't already been opened */
315 lock_kernel();
316 spin_lock_bh(&logptr->priv_lock); 316 spin_lock_bh(&logptr->priv_lock);
317 if (logptr->dev_in_use) { 317 if (logptr->dev_in_use) {
318 spin_unlock_bh(&logptr->priv_lock); 318 spin_unlock_bh(&logptr->priv_lock);
319 unlock_kernel();
320 return -EBUSY; 319 return -EBUSY;
321 } 320 }
322 logptr->dev_in_use = 1; 321 logptr->dev_in_use = 1;
@@ -360,9 +359,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
360 || (logptr->iucv_path_severed)); 359 || (logptr->iucv_path_severed));
361 if (logptr->iucv_path_severed) 360 if (logptr->iucv_path_severed)
362 goto out_record; 361 goto out_record;
363 ret = nonseekable_open(inode, filp); 362 nonseekable_open(inode, filp);
364 unlock_kernel(); 363 return 0;
365 return ret;
366 364
367out_record: 365out_record:
368 if (logptr->autorecording) 366 if (logptr->autorecording)
@@ -372,7 +370,6 @@ out_path:
372 logptr->path = NULL; 370 logptr->path = NULL;
373out_dev: 371out_dev:
374 logptr->dev_in_use = 0; 372 logptr->dev_in_use = 0;
375 unlock_kernel();
376 return -EIO; 373 return -EIO;
377} 374}
378 375
@@ -679,7 +676,7 @@ static int vmlogrdr_pm_prepare(struct device *dev)
679} 676}
680 677
681 678
682static struct dev_pm_ops vmlogrdr_pm_ops = { 679static const struct dev_pm_ops vmlogrdr_pm_ops = {
683 .prepare = vmlogrdr_pm_prepare, 680 .prepare = vmlogrdr_pm_prepare,
684}; 681};
685 682
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 77571b68539a..1de672f21037 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -12,6 +12,7 @@
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/cdev.h> 14#include <linux/cdev.h>
15#include <linux/slab.h>
15#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
16 17
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
@@ -695,7 +696,6 @@ static int ur_open(struct inode *inode, struct file *file)
695 696
696 if (accmode == O_RDWR) 697 if (accmode == O_RDWR)
697 return -EACCES; 698 return -EACCES;
698 lock_kernel();
699 /* 699 /*
700 * We treat the minor number as the devno of the ur device 700 * We treat the minor number as the devno of the ur device
701 * to find in the driver tree. 701 * to find in the driver tree.
@@ -749,7 +749,6 @@ static int ur_open(struct inode *inode, struct file *file)
749 goto fail_urfile_free; 749 goto fail_urfile_free;
750 urf->file_reclen = rc; 750 urf->file_reclen = rc;
751 file->private_data = urf; 751 file->private_data = urf;
752 unlock_kernel();
753 return 0; 752 return 0;
754 753
755fail_urfile_free: 754fail_urfile_free:
@@ -761,7 +760,6 @@ fail_unlock:
761fail_put: 760fail_put:
762 urdev_put(urd); 761 urdev_put(urd);
763out: 762out:
764 unlock_kernel();
765 return rc; 763 return rc;
766} 764}
767 765
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index f2bc287b69e4..e13508c98b1a 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -17,9 +17,9 @@
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/slab.h>
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21#include <linux/watchdog.h> 22#include <linux/watchdog.h>
22#include <linux/smp_lock.h>
23 23
24#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
25#include <asm/io.h> 25#include <asm/io.h>
@@ -49,6 +49,8 @@ static unsigned int vmwdt_interval = 60;
49static unsigned long vmwdt_is_open; 49static unsigned long vmwdt_is_open;
50static int vmwdt_expect_close; 50static int vmwdt_expect_close;
51 51
52static DEFINE_MUTEX(vmwdt_mutex);
53
52#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ 54#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
53#define VMWDT_RUNNING 1 /* The watchdog is armed */ 55#define VMWDT_RUNNING 1 /* The watchdog is armed */
54 56
@@ -133,15 +135,11 @@ static int __init vmwdt_probe(void)
133static int vmwdt_open(struct inode *i, struct file *f) 135static int vmwdt_open(struct inode *i, struct file *f)
134{ 136{
135 int ret; 137 int ret;
136 lock_kernel(); 138 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
137 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
138 unlock_kernel();
139 return -EBUSY; 139 return -EBUSY;
140 }
141 ret = vmwdt_keepalive(); 140 ret = vmwdt_keepalive();
142 if (ret) 141 if (ret)
143 clear_bit(VMWDT_OPEN, &vmwdt_is_open); 142 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
144 unlock_kernel();
145 return ret ? ret : nonseekable_open(i, f); 143 return ret ? ret : nonseekable_open(i, f);
146} 144}
147 145
@@ -160,8 +158,7 @@ static struct watchdog_info vmwdt_info = {
160 .identity = "z/VM Watchdog Timer", 158 .identity = "z/VM Watchdog Timer",
161}; 159};
162 160
163static int vmwdt_ioctl(struct inode *i, struct file *f, 161static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
164 unsigned int cmd, unsigned long arg)
165{ 162{
166 switch (cmd) { 163 switch (cmd) {
167 case WDIOC_GETSUPPORT: 164 case WDIOC_GETSUPPORT:
@@ -205,10 +202,19 @@ static int vmwdt_ioctl(struct inode *i, struct file *f,
205 case WDIOC_KEEPALIVE: 202 case WDIOC_KEEPALIVE:
206 return vmwdt_keepalive(); 203 return vmwdt_keepalive();
207 } 204 }
208
209 return -EINVAL; 205 return -EINVAL;
210} 206}
211 207
208static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
209{
210 int rc;
211
212 mutex_lock(&vmwdt_mutex);
213 rc = __vmwdt_ioctl(cmd, arg);
214 mutex_unlock(&vmwdt_mutex);
215 return (long) rc;
216}
217
212static ssize_t vmwdt_write(struct file *f, const char __user *buf, 218static ssize_t vmwdt_write(struct file *f, const char __user *buf,
213 size_t count, loff_t *ppos) 219 size_t count, loff_t *ppos)
214{ 220{
@@ -288,7 +294,7 @@ static struct notifier_block vmwdt_power_notifier = {
288static const struct file_operations vmwdt_fops = { 294static const struct file_operations vmwdt_fops = {
289 .open = &vmwdt_open, 295 .open = &vmwdt_open,
290 .release = &vmwdt_close, 296 .release = &vmwdt_close,
291 .ioctl = &vmwdt_ioctl, 297 .unlocked_ioctl = &vmwdt_ioctl,
292 .write = &vmwdt_write, 298 .write = &vmwdt_write,
293 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
294}; 300};
@@ -309,6 +315,10 @@ static int __init vmwdt_init(void)
309 ret = register_pm_notifier(&vmwdt_power_notifier); 315 ret = register_pm_notifier(&vmwdt_power_notifier);
310 if (ret) 316 if (ret)
311 return ret; 317 return ret;
318 /*
319 * misc_register() has to be the last action in module_init(), because
320 * file operations will be available right after this.
321 */
312 ret = misc_register(&vmwdt_dev); 322 ret = misc_register(&vmwdt_dev);
313 if (ret) { 323 if (ret) {
314 unregister_pm_notifier(&vmwdt_power_notifier); 324 unregister_pm_notifier(&vmwdt_power_notifier);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 82daa3c1dc9c..7217966f7d31 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -13,8 +13,10 @@
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h>
16#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19#include <asm/asm-offsets.h>
18#include <asm/ipl.h> 20#include <asm/ipl.h>
19#include <asm/sclp.h> 21#include <asm/sclp.h>
20#include <asm/setup.h> 22#include <asm/setup.h>
@@ -40,12 +42,12 @@ enum arch_id {
40/* dump system info */ 42/* dump system info */
41 43
42struct sys_info { 44struct sys_info {
43 enum arch_id arch; 45 enum arch_id arch;
44 unsigned long sa_base; 46 unsigned long sa_base;
45 u32 sa_size; 47 u32 sa_size;
46 int cpu_map[NR_CPUS]; 48 int cpu_map[NR_CPUS];
47 unsigned long mem_size; 49 unsigned long mem_size;
48 union save_area lc_mask; 50 struct save_area lc_mask;
49}; 51};
50 52
51struct ipib_info { 53struct ipib_info {
@@ -140,33 +142,6 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
140 return memcpy_hsa(dest, src, count, TO_KERNEL); 142 return memcpy_hsa(dest, src, count, TO_KERNEL);
141} 143}
142 144
143static int memcpy_real(void *dest, unsigned long src, size_t count)
144{
145 unsigned long flags;
146 int rc = -EFAULT;
147 register unsigned long _dest asm("2") = (unsigned long) dest;
148 register unsigned long _len1 asm("3") = (unsigned long) count;
149 register unsigned long _src asm("4") = src;
150 register unsigned long _len2 asm("5") = (unsigned long) count;
151
152 if (count == 0)
153 return 0;
154 flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
155 asm volatile (
156 "0: mvcle %1,%2,0x0\n"
157 "1: jo 0b\n"
158 " lhi %0,0x0\n"
159 "2:\n"
160 EX_TABLE(1b,2b)
161 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
162 "+d" (_len2), "=m" (*((long*)dest))
163 : "m" (*((long*)src))
164 : "cc", "memory");
165 __raw_local_irq_ssm(flags);
166
167 return rc;
168}
169
170static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) 145static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
171{ 146{
172 static char buf[4096]; 147 static char buf[4096];
@@ -174,7 +149,7 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
174 149
175 while (offs < count) { 150 while (offs < count) {
176 size = min(sizeof(buf), count - offs); 151 size = min(sizeof(buf), count - offs);
177 if (memcpy_real(buf, src + offs, size)) 152 if (memcpy_real(buf, (void *) src + offs, size))
178 return -EFAULT; 153 return -EFAULT;
179 if (copy_to_user(dest + offs, buf, size)) 154 if (copy_to_user(dest + offs, buf, size))
180 return -EFAULT; 155 return -EFAULT;
@@ -183,52 +158,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
183 return 0; 158 return 0;
184} 159}
185 160
186#ifdef __s390x__
187/*
188 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
189 */
190static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
191 int cpu)
192{
193 int i;
194
195 for (i = 0; i < 16; i++) {
196 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
197 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
198 out->s390.ctrl_regs[i] =
199 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
200 }
201 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
202 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
203 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
204 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
205 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
206 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
207 out->s390.psw[1] |= 0x8; /* set bit 12 */
208 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
209 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
210 out->s390.pref_reg = in->s390x.pref_reg;
211 out->s390.timer = in->s390x.timer;
212 out->s390.clk_cmp = in->s390x.clk_cmp;
213}
214
215static void __init s390x_to_s390_save_areas(void)
216{
217 int i = 1;
218 static union save_area tmp;
219
220 while (zfcpdump_save_areas[i]) {
221 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
222 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
223 i++;
224 }
225}
226
227#endif /* __s390x__ */
228
229static int __init init_cpu_info(enum arch_id arch) 161static int __init init_cpu_info(enum arch_id arch)
230{ 162{
231 union save_area *sa; 163 struct save_area *sa;
232 164
233 /* get info for boot cpu from lowcore, stored in the HSA */ 165 /* get info for boot cpu from lowcore, stored in the HSA */
234 166
@@ -241,20 +173,12 @@ static int __init init_cpu_info(enum arch_id arch)
241 return -EIO; 173 return -EIO;
242 } 174 }
243 zfcpdump_save_areas[0] = sa; 175 zfcpdump_save_areas[0] = sa;
244
245#ifdef __s390x__
246 /* convert s390x regs to s390, if we are dumping an s390 Linux */
247
248 if (arch == ARCH_S390)
249 s390x_to_s390_save_areas();
250#endif
251
252 return 0; 176 return 0;
253} 177}
254 178
255static DEFINE_MUTEX(zcore_mutex); 179static DEFINE_MUTEX(zcore_mutex);
256 180
257#define DUMP_VERSION 0x3 181#define DUMP_VERSION 0x5
258#define DUMP_MAGIC 0xa8190173618f23fdULL 182#define DUMP_MAGIC 0xa8190173618f23fdULL
259#define DUMP_ARCH_S390X 2 183#define DUMP_ARCH_S390X 2
260#define DUMP_ARCH_S390 1 184#define DUMP_ARCH_S390 1
@@ -279,7 +203,14 @@ struct zcore_header {
279 u32 volnr; 203 u32 volnr;
280 u32 build_arch; 204 u32 build_arch;
281 u64 rmem_size; 205 u64 rmem_size;
282 char pad2[4016]; 206 u8 mvdump;
207 u16 cpu_cnt;
208 u16 real_cpu_cnt;
209 u8 end_pad1[0x200-0x061];
210 u64 mvdump_sign;
211 u64 mvdump_zipl_time;
212 u8 end_pad2[0x800-0x210];
213 u32 lc_vec[512];
283} __attribute__((packed,__aligned__(16))); 214} __attribute__((packed,__aligned__(16)));
284 215
285static struct zcore_header zcore_header = { 216static struct zcore_header zcore_header = {
@@ -289,7 +220,7 @@ static struct zcore_header zcore_header = {
289 .dump_level = 0, 220 .dump_level = 0,
290 .page_size = PAGE_SIZE, 221 .page_size = PAGE_SIZE,
291 .mem_start = 0, 222 .mem_start = 0,
292#ifdef __s390x__ 223#ifdef CONFIG_64BIT
293 .build_arch = DUMP_ARCH_S390X, 224 .build_arch = DUMP_ARCH_S390X,
294#else 225#else
295 .build_arch = DUMP_ARCH_S390, 226 .build_arch = DUMP_ARCH_S390,
@@ -340,11 +271,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
340 unsigned long prefix; 271 unsigned long prefix;
341 unsigned long sa_off, len, buf_off; 272 unsigned long sa_off, len, buf_off;
342 273
343 if (sys_info.arch == ARCH_S390) 274 prefix = zfcpdump_save_areas[i]->pref_reg;
344 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
345 else
346 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
347
348 sa_start = prefix + sys_info.sa_base; 275 sa_start = prefix + sys_info.sa_base;
349 sa_end = prefix + sys_info.sa_base + sys_info.sa_size; 276 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
350 277
@@ -561,34 +488,39 @@ static const struct file_operations zcore_reipl_fops = {
561 .release = zcore_reipl_release, 488 .release = zcore_reipl_release,
562}; 489};
563 490
491#ifdef CONFIG_32BIT
564 492
565static void __init set_s390_lc_mask(union save_area *map) 493static void __init set_lc_mask(struct save_area *map)
566{ 494{
567 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); 495 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
568 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); 496 memset(&map->timer, 0xff, sizeof(map->timer));
569 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); 497 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
570 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); 498 memset(&map->psw, 0xff, sizeof(map->psw));
571 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); 499 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
572 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); 500 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
573 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); 501 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
574 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); 502 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
575 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); 503 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
576} 504}
577 505
578static void __init set_s390x_lc_mask(union save_area *map) 506#else /* CONFIG_32BIT */
507
508static void __init set_lc_mask(struct save_area *map)
579{ 509{
580 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); 510 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
581 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); 511 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
582 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); 512 memset(&map->psw, 0xff, sizeof(map->psw));
583 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); 513 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
584 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); 514 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
585 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); 515 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
586 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); 516 memset(&map->timer, 0xff, sizeof(map->timer));
587 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); 517 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
588 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); 518 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
589 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); 519 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
590} 520}
591 521
522#endif /* CONFIG_32BIT */
523
592/* 524/*
593 * Initialize dump globals for a given architecture 525 * Initialize dump globals for a given architecture
594 */ 526 */
@@ -599,21 +531,18 @@ static int __init sys_info_init(enum arch_id arch)
599 switch (arch) { 531 switch (arch) {
600 case ARCH_S390X: 532 case ARCH_S390X:
601 pr_alert("DETECTED 'S390X (64 bit) OS'\n"); 533 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
602 sys_info.sa_base = SAVE_AREA_BASE_S390X;
603 sys_info.sa_size = sizeof(struct save_area_s390x);
604 set_s390x_lc_mask(&sys_info.lc_mask);
605 break; 534 break;
606 case ARCH_S390: 535 case ARCH_S390:
607 pr_alert("DETECTED 'S390 (32 bit) OS'\n"); 536 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
608 sys_info.sa_base = SAVE_AREA_BASE_S390;
609 sys_info.sa_size = sizeof(struct save_area_s390);
610 set_s390_lc_mask(&sys_info.lc_mask);
611 break; 537 break;
612 default: 538 default:
613 pr_alert("0x%x is an unknown architecture.\n",arch); 539 pr_alert("0x%x is an unknown architecture.\n",arch);
614 return -EINVAL; 540 return -EINVAL;
615 } 541 }
542 sys_info.sa_base = SAVE_AREA_BASE;
543 sys_info.sa_size = sizeof(struct save_area);
616 sys_info.arch = arch; 544 sys_info.arch = arch;
545 set_lc_mask(&sys_info.lc_mask);
617 rc = init_cpu_info(arch); 546 rc = init_cpu_info(arch);
618 if (rc) 547 if (rc)
619 return rc; 548 return rc;
@@ -660,8 +589,9 @@ static int __init get_mem_size(unsigned long *mem)
660 589
661static int __init zcore_header_init(int arch, struct zcore_header *hdr) 590static int __init zcore_header_init(int arch, struct zcore_header *hdr)
662{ 591{
663 int rc; 592 int rc, i;
664 unsigned long memory = 0; 593 unsigned long memory = 0;
594 u32 prefix;
665 595
666 if (arch == ARCH_S390X) 596 if (arch == ARCH_S390X)
667 hdr->arch_id = DUMP_ARCH_S390X; 597 hdr->arch_id = DUMP_ARCH_S390X;
@@ -676,6 +606,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
676 hdr->num_pages = memory / PAGE_SIZE; 606 hdr->num_pages = memory / PAGE_SIZE;
677 hdr->tod = get_clock(); 607 hdr->tod = get_clock();
678 get_cpu_id(&hdr->cpu_id); 608 get_cpu_id(&hdr->cpu_id);
609 for (i = 0; zfcpdump_save_areas[i]; i++) {
610 prefix = zfcpdump_save_areas[i]->pref_reg;
611 hdr->real_cpu_cnt++;
612 if (!prefix)
613 continue;
614 hdr->lc_vec[hdr->cpu_cnt] = prefix;
615 hdr->cpu_cnt++;
616 }
679 return 0; 617 return 0;
680} 618}
681 619
@@ -699,12 +637,8 @@ static int __init zcore_reipl_init(void)
699 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) 637 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
700 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); 638 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
701 else 639 else
702 rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE); 640 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
703 if (rc) { 641 if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
704 free_page((unsigned long) ipl_block);
705 return rc;
706 }
707 if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
708 ipib_info.checksum) { 642 ipib_info.checksum) {
709 TRACE("Checksum does not match\n"); 643 TRACE("Checksum does not match\n");
710 free_page((unsigned long) ipl_block); 644 free_page((unsigned long) ipl_block);
@@ -741,14 +675,21 @@ static int __init zcore_init(void)
741 if (rc) 675 if (rc)
742 goto fail; 676 goto fail;
743 677
744#ifndef __s390x__ 678#ifdef CONFIG_64BIT
679 if (arch == ARCH_S390) {
680 pr_alert("The 64-bit dump tool cannot be used for a "
681 "32-bit system\n");
682 rc = -EINVAL;
683 goto fail;
684 }
685#else /* CONFIG_64BIT */
745 if (arch == ARCH_S390X) { 686 if (arch == ARCH_S390X) {
746 pr_alert("The 32-bit dump tool cannot be used for a " 687 pr_alert("The 32-bit dump tool cannot be used for a "
747 "64-bit system\n"); 688 "64-bit system\n");
748 rc = -EINVAL; 689 rc = -EINVAL;
749 goto fail; 690 goto fail;
750 } 691 }
751#endif 692#endif /* CONFIG_64BIT */
752 693
753 rc = sys_info_init(arch); 694 rc = sys_info_init(arch);
754 if (rc) 695 if (rc)
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65e..e1b700a19648 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,12 +3,12 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o ccwreq.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12 12
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o 13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
14obj-$(CONFIG_QDIO) += qdio.o 14obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 7eab9ab9f406..13cb60162e42 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/slab.h>
18#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 18#include <linux/seq_file.h>
20#include <linux/ctype.h> 19#include <linux/ctype.h>
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f7747..5f97ea2ee6b1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev)
560 return gdrv->restore ? gdrv->restore(gdev) : 0; 560 return gdrv->restore ? gdrv->restore(gdev) : 0;
561} 561}
562 562
563static struct dev_pm_ops ccwgroup_pm_ops = { 563static const struct dev_pm_ops ccwgroup_pm_ops = {
564 .prepare = ccwgroup_pm_prepare, 564 .prepare = ccwgroup_pm_prepare,
565 .complete = ccwgroup_pm_complete, 565 .complete = ccwgroup_pm_complete,
566 .freeze = ccwgroup_pm_freeze, 566 .freeze = ccwgroup_pm_freeze,
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000000..37df42af05ec
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,325 @@
1/*
2 * Handling of internal CCW device requests.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/err.h>
10#include <asm/ccwdev.h>
11#include <asm/cio.h>
12
13#include "io_sch.h"
14#include "cio.h"
15#include "device.h"
16#include "cio_debug.h"
17
18/**
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
22 *
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
25 */
26int lpm_adjust(int lpm, int mask)
27{
28 while (lpm && ((lpm & mask) == 0))
29 lpm >>= 1;
30 return lpm;
31}
32
33/*
34 * Adjust path mask to use next path and reset retry count. Return resulting
35 * path mask.
36 */
37static u16 ccwreq_next_path(struct ccw_device *cdev)
38{
39 struct ccw_request *req = &cdev->private->req;
40
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
43
44 return req->mask;
45}
46
47/*
48 * Clean up device state and report to callback.
49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{
52 struct ccw_request *req = &cdev->private->req;
53
54 if (req->done)
55 return;
56 req->done = 1;
57 ccw_device_set_timeout(cdev, 0);
58 memset(&cdev->private->irb, 0, sizeof(struct irb));
59 if (rc && rc != -ENODEV && req->drc)
60 rc = req->drc;
61 req->callback(cdev, req->data, rc);
62}
63
64/*
65 * (Re-)Start the operation until retries and paths are exhausted.
66 */
67static void ccwreq_do(struct ccw_device *cdev)
68{
69 struct ccw_request *req = &cdev->private->req;
70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
71 struct ccw1 *cp = req->cp;
72 int rc = -EACCES;
73
74 while (req->mask) {
75 if (req->retries-- == 0) {
76 /* Retries exhausted, try next path. */
77 ccwreq_next_path(cdev);
78 continue;
79 }
80 /* Perform start function. */
81 memset(&cdev->private->irb, 0, sizeof(struct irb));
82 rc = cio_start(sch, cp, (u8) req->mask);
83 if (rc == 0) {
84 /* I/O started successfully. */
85 ccw_device_set_timeout(cdev, req->timeout);
86 return;
87 }
88 if (rc == -ENODEV) {
89 /* Permanent device error. */
90 break;
91 }
92 if (rc == -EACCES) {
93 /* Permant path error. */
94 ccwreq_next_path(cdev);
95 continue;
96 }
97 /* Temporary improper status. */
98 rc = cio_clear(sch);
99 if (rc)
100 break;
101 return;
102 }
103 ccwreq_stop(cdev, rc);
104}
105
106/**
107 * ccw_request_start - perform I/O request
108 * @cdev: ccw device
109 *
110 * Perform the I/O request specified by cdev->req.
111 */
112void ccw_request_start(struct ccw_device *cdev)
113{
114 struct ccw_request *req = &cdev->private->req;
115
116 /* Try all paths twice to counter link flapping. */
117 req->mask = 0x8080;
118 req->retries = req->maxretries;
119 req->mask = lpm_adjust(req->mask, req->lpm);
120 req->drc = 0;
121 req->done = 0;
122 req->cancel = 0;
123 if (!req->mask)
124 goto out_nopath;
125 ccwreq_do(cdev);
126 return;
127
128out_nopath:
129 ccwreq_stop(cdev, -EACCES);
130}
131
132/**
133 * ccw_request_cancel - cancel running I/O request
134 * @cdev: ccw device
135 *
136 * Cancel the I/O request specified by cdev->req. Return non-zero if request
137 * has already finished, zero otherwise.
138 */
139int ccw_request_cancel(struct ccw_device *cdev)
140{
141 struct subchannel *sch = to_subchannel(cdev->dev.parent);
142 struct ccw_request *req = &cdev->private->req;
143 int rc;
144
145 if (req->done)
146 return 1;
147 req->cancel = 1;
148 rc = cio_clear(sch);
149 if (rc)
150 ccwreq_stop(cdev, rc);
151 return 0;
152}
153
154/*
155 * Return the status of the internal I/O started on the specified ccw device.
156 * Perform BASIC SENSE if required.
157 */
158static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
159{
160 struct irb *irb = &cdev->private->irb;
161 struct cmd_scsw *scsw = &irb->scsw.cmd;
162
163 /* Perform BASIC SENSE if needed. */
164 if (ccw_device_accumulate_and_sense(cdev, lcirb))
165 return IO_RUNNING;
166 /* Check for halt/clear interrupt. */
167 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
168 return IO_KILLED;
169 /* Check for path error. */
170 if (scsw->cc == 3 || scsw->pno)
171 return IO_PATH_ERROR;
172 /* Handle BASIC SENSE data. */
173 if (irb->esw.esw0.erw.cons) {
174 CIO_TRACE_EVENT(2, "sensedata");
175 CIO_HEX_EVENT(2, &cdev->private->dev_id,
176 sizeof(struct ccw_dev_id));
177 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
178 /* Check for command reject. */
179 if (irb->ecw[0] & SNS0_CMD_REJECT)
180 return IO_REJECTED;
181 /* Assume that unexpected SENSE data implies an error. */
182 return IO_STATUS_ERROR;
183 }
184 /* Check for channel errors. */
185 if (scsw->cstat != 0)
186 return IO_STATUS_ERROR;
187 /* Check for device errors. */
188 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
189 return IO_STATUS_ERROR;
190 /* Check for final state. */
191 if (!(scsw->dstat & DEV_STAT_DEV_END))
192 return IO_RUNNING;
193 /* Check for other improper status. */
194 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
195 return IO_STATUS_ERROR;
196 return IO_DONE;
197}
198
199/*
200 * Log ccw request status.
201 */
202static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
203{
204 struct ccw_request *req = &cdev->private->req;
205 struct {
206 struct ccw_dev_id dev_id;
207 u16 retries;
208 u8 lpm;
209 u8 status;
210 } __attribute__ ((packed)) data;
211 data.dev_id = cdev->private->dev_id;
212 data.retries = req->retries;
213 data.lpm = (u8) req->mask;
214 data.status = (u8) status;
215 CIO_TRACE_EVENT(2, "reqstat");
216 CIO_HEX_EVENT(2, &data, sizeof(data));
217}
218
219/**
220 * ccw_request_handler - interrupt handler for I/O request procedure.
221 * @cdev: ccw device
222 *
223 * Handle interrupt during I/O request procedure.
224 */
225void ccw_request_handler(struct ccw_device *cdev)
226{
227 struct irb *irb = (struct irb *)&S390_lowcore.irb;
228 struct ccw_request *req = &cdev->private->req;
229 enum io_status status;
230 int rc = -EOPNOTSUPP;
231
232 /* Check status of I/O request. */
233 status = ccwreq_status(cdev, irb);
234 if (req->filter)
235 status = req->filter(cdev, req->data, irb, status);
236 if (status != IO_RUNNING)
237 ccw_device_set_timeout(cdev, 0);
238 if (status != IO_DONE && status != IO_RUNNING)
239 ccwreq_log_status(cdev, status);
240 switch (status) {
241 case IO_DONE:
242 break;
243 case IO_RUNNING:
244 return;
245 case IO_REJECTED:
246 goto err;
247 case IO_PATH_ERROR:
248 goto out_next_path;
249 case IO_STATUS_ERROR:
250 goto out_restart;
251 case IO_KILLED:
252 /* Check if request was cancelled on purpose. */
253 if (req->cancel) {
254 rc = -EIO;
255 goto err;
256 }
257 goto out_restart;
258 }
259 /* Check back with request initiator. */
260 if (!req->check)
261 goto out;
262 switch (req->check(cdev, req->data)) {
263 case 0:
264 break;
265 case -EAGAIN:
266 goto out_restart;
267 case -EACCES:
268 goto out_next_path;
269 default:
270 goto err;
271 }
272out:
273 ccwreq_stop(cdev, 0);
274 return;
275
276out_next_path:
277 /* Try next path and restart I/O. */
278 if (!ccwreq_next_path(cdev)) {
279 rc = -EACCES;
280 goto err;
281 }
282out_restart:
283 /* Restart. */
284 ccwreq_do(cdev);
285 return;
286err:
287 ccwreq_stop(cdev, rc);
288}
289
290
291/**
292 * ccw_request_timeout - timeout handler for I/O request procedure
293 * @cdev: ccw device
294 *
295 * Handle timeout during I/O request procedure.
296 */
297void ccw_request_timeout(struct ccw_device *cdev)
298{
299 struct subchannel *sch = to_subchannel(cdev->dev.parent);
300 struct ccw_request *req = &cdev->private->req;
301 int rc;
302
303 if (!ccwreq_next_path(cdev)) {
304 /* set the final return code for this request */
305 req->drc = -ETIME;
306 }
307 rc = cio_clear(sch);
308 if (rc)
309 goto err;
310 return;
311
312err:
313 ccwreq_stop(cdev, rc);
314}
315
316/**
317 * ccw_request_notoper - notoper handler for I/O request procedure
318 * @cdev: ccw device
319 *
320 * Handle timeout during I/O request procedure.
321 */
322void ccw_request_notoper(struct ccw_device *cdev)
323{
324 ccwreq_stop(cdev, -ENODEV);
325}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 8ab51608da55..1d16189f2f2d 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -15,6 +15,7 @@
15#include <linux/wait.h> 15#include <linux/wait.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/slab.h>
18#include <asm/chpid.h> 19#include <asm/chpid.h>
19#include <asm/sclp.h> 20#include <asm/sclp.h>
20#include <asm/crw.h> 21#include <asm/crw.h>
@@ -65,7 +66,7 @@ static void set_chp_logically_online(struct chp_id chpid, int onoff)
65 chpid_to_chp(chpid)->state = onoff; 66 chpid_to_chp(chpid)->state = onoff;
66} 67}
67 68
68/* On succes return 0 if channel-path is varied offline, 1 if it is varied 69/* On success return 0 if channel-path is varied offline, 1 if it is varied
69 * online. Return -ENODEV if channel-path is not registered. */ 70 * online. Return -ENODEV if channel-path is not registered. */
70int chp_get_status(struct chp_id chpid) 71int chp_get_status(struct chp_id chpid)
71{ 72{
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e567648..ce7cb87479fe 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -29,6 +29,7 @@
29#include "chsc.h" 29#include "chsc.h"
30 30
31static void *sei_page; 31static void *sei_page;
32static DEFINE_SPINLOCK(sda_lock);
32 33
33/** 34/**
34 * chsc_error_from_response() - convert a chsc response to an error 35 * chsc_error_from_response() - convert a chsc response to an error
@@ -574,7 +575,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
574 secm_area->request.length = 0x0050; 575 secm_area->request.length = 0x0050;
575 secm_area->request.code = 0x0016; 576 secm_area->request.code = 0x0016;
576 577
577 secm_area->key = PAGE_DEFAULT_KEY; 578 secm_area->key = PAGE_DEFAULT_KEY >> 4;
578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 579 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 580 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
580 581
@@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void)
832 kfree(sei_page); 833 kfree(sei_page);
833} 834}
834 835
835int __init 836int chsc_enable_facility(int operation_code)
836chsc_enable_facility(int operation_code)
837{ 837{
838 int ret; 838 int ret;
839 struct { 839 static struct {
840 struct chsc_header request; 840 struct chsc_header request;
841 u8 reserved1:4; 841 u8 reserved1:4;
842 u8 format:4; 842 u8 format:4;
@@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code)
849 u32 reserved5:4; 849 u32 reserved5:4;
850 u32 format2:4; 850 u32 format2:4;
851 u32 reserved6:24; 851 u32 reserved6:24;
852 } __attribute__ ((packed)) *sda_area; 852 } __attribute__ ((packed, aligned(4096))) sda_area;
853 853
854 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 854 spin_lock(&sda_lock);
855 if (!sda_area) 855 memset(&sda_area, 0, sizeof(sda_area));
856 return -ENOMEM; 856 sda_area.request.length = 0x0400;
857 sda_area->request.length = 0x0400; 857 sda_area.request.code = 0x0031;
858 sda_area->request.code = 0x0031; 858 sda_area.operation_code = operation_code;
859 sda_area->operation_code = operation_code;
860 859
861 ret = chsc(sda_area); 860 ret = chsc(&sda_area);
862 if (ret > 0) { 861 if (ret > 0) {
863 ret = (ret == 3) ? -ENODEV : -EBUSY; 862 ret = (ret == 3) ? -ENODEV : -EBUSY;
864 goto out; 863 goto out;
865 } 864 }
866 865
867 switch (sda_area->response.code) { 866 switch (sda_area.response.code) {
868 case 0x0101: 867 case 0x0101:
869 ret = -EOPNOTSUPP; 868 ret = -EOPNOTSUPP;
870 break; 869 break;
871 default: 870 default:
872 ret = chsc_error_from_response(sda_area->response.code); 871 ret = chsc_error_from_response(sda_area.response.code);
873 } 872 }
874 if (ret != 0) 873 if (ret != 0)
875 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 874 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
876 operation_code, sda_area->response.code); 875 operation_code, sda_area.response.code);
877 out: 876 out:
878 free_page((unsigned long)sda_area); 877 spin_unlock(&sda_lock);
879 return ret; 878 return ret;
880} 879}
881 880
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index cc5144b6f9d9..3b6f4adc5094 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -7,11 +7,13 @@
7 * 7 *
8 */ 8 */
9 9
10#include <linux/slab.h>
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/uaccess.h> 13#include <linux/uaccess.h>
13#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
14 15
16#include <asm/compat.h>
15#include <asm/cio.h> 17#include <asm/cio.h>
16#include <asm/chsc.h> 18#include <asm/chsc.h>
17#include <asm/isc.h> 19#include <asm/isc.h>
@@ -50,7 +52,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
50{ 52{
51 struct chsc_private *private = sch->private; 53 struct chsc_private *private = sch->private;
52 struct chsc_request *request = private->request; 54 struct chsc_request *request = private->request;
53 struct irb *irb = (struct irb *)__LC_IRB; 55 struct irb *irb = (struct irb *)&S390_lowcore.irb;
54 56
55 CHSC_LOG(4, "irb"); 57 CHSC_LOG(4, "irb");
56 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 58 CHSC_LOG_HEX(4, irb, sizeof(*irb));
@@ -122,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch)
122 * since we don't have a way to clear the subchannel and 124 * since we don't have a way to clear the subchannel and
123 * cannot disable it with a request running. 125 * cannot disable it with a request running.
124 */ 126 */
125 cc = stsch(sch->schid, &schib); 127 cc = stsch_err(sch->schid, &schib);
126 if (!cc && scsw_stctl(&schib.scsw)) 128 if (!cc && scsw_stctl(&schib.scsw))
127 return -EAGAIN; 129 return -EAGAIN;
128 return 0; 130 return 0;
@@ -236,7 +238,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
236 int ret = -ENODEV; 238 int ret = -ENODEV;
237 char dbf[10]; 239 char dbf[10];
238 240
239 chsc_area->header.key = PAGE_DEFAULT_KEY; 241 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
240 while ((sch = chsc_get_next_subchannel(sch))) { 242 while ((sch = chsc_get_next_subchannel(sch))) {
241 spin_lock(sch->lock); 243 spin_lock(sch->lock);
242 private = sch->private; 244 private = sch->private;
@@ -770,24 +772,30 @@ out_free:
770static long chsc_ioctl(struct file *filp, unsigned int cmd, 772static long chsc_ioctl(struct file *filp, unsigned int cmd,
771 unsigned long arg) 773 unsigned long arg)
772{ 774{
775 void __user *argp;
776
773 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 777 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
778 if (is_compat_task())
779 argp = compat_ptr(arg);
780 else
781 argp = (void __user *)arg;
774 switch (cmd) { 782 switch (cmd) {
775 case CHSC_START: 783 case CHSC_START:
776 return chsc_ioctl_start((void __user *)arg); 784 return chsc_ioctl_start(argp);
777 case CHSC_INFO_CHANNEL_PATH: 785 case CHSC_INFO_CHANNEL_PATH:
778 return chsc_ioctl_info_channel_path((void __user *)arg); 786 return chsc_ioctl_info_channel_path(argp);
779 case CHSC_INFO_CU: 787 case CHSC_INFO_CU:
780 return chsc_ioctl_info_cu((void __user *)arg); 788 return chsc_ioctl_info_cu(argp);
781 case CHSC_INFO_SCH_CU: 789 case CHSC_INFO_SCH_CU:
782 return chsc_ioctl_info_sch_cu((void __user *)arg); 790 return chsc_ioctl_info_sch_cu(argp);
783 case CHSC_INFO_CI: 791 case CHSC_INFO_CI:
784 return chsc_ioctl_conf_info((void __user *)arg); 792 return chsc_ioctl_conf_info(argp);
785 case CHSC_INFO_CCL: 793 case CHSC_INFO_CCL:
786 return chsc_ioctl_conf_comp_list((void __user *)arg); 794 return chsc_ioctl_conf_comp_list(argp);
787 case CHSC_INFO_CPD: 795 case CHSC_INFO_CPD:
788 return chsc_ioctl_chpd((void __user *)arg); 796 return chsc_ioctl_chpd(argp);
789 case CHSC_INFO_DCAL: 797 case CHSC_INFO_DCAL:
790 return chsc_ioctl_dcal((void __user *)arg); 798 return chsc_ioctl_dcal(argp);
791 default: /* unknown ioctl number */ 799 default: /* unknown ioctl number */
792 return -ENOIOCTLCMD; 800 return -ENOIOCTLCMD;
793 } 801 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 138124fcfcad..5feea1a371e1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch)
361 struct schib schib; 361 struct schib schib;
362 int ccode, retry, ret = 0; 362 int ccode, retry, ret = 0;
363 363
364 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 364 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
365 return -ENODEV; 365 return -ENODEV;
366 366
367 for (retry = 0; retry < 5; retry++) { 367 for (retry = 0; retry < 5; retry++) {
@@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch)
372 return ccode; 372 return ccode;
373 switch (ccode) { 373 switch (ccode) {
374 case 0: /* successful */ 374 case 0: /* successful */
375 if (stsch(sch->schid, &schib) || 375 if (stsch_err(sch->schid, &schib) ||
376 !css_sch_is_valid(&schib)) 376 !css_sch_is_valid(&schib))
377 return -ENODEV; 377 return -ENODEV;
378 if (cio_check_config(sch, &schib)) { 378 if (cio_check_config(sch, &schib)) {
@@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch)
404{ 404{
405 struct schib schib; 405 struct schib schib;
406 406
407 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 407 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
408 return -ENODEV; 408 return -ENODEV;
409 409
410 memcpy(&sch->schib, &schib, sizeof(schib)); 410 memcpy(&sch->schib, &schib, sizeof(schib));
@@ -618,14 +618,15 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
618 old_regs = set_irq_regs(regs); 618 old_regs = set_irq_regs(regs);
619 s390_idle_check(); 619 s390_idle_check();
620 irq_enter(); 620 irq_enter();
621 __get_cpu_var(s390_idle).nohz_delay = 1;
621 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 622 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
622 /* Serve timer interrupts first. */ 623 /* Serve timer interrupts first. */
623 clock_comparator_work(); 624 clock_comparator_work();
624 /* 625 /*
625 * Get interrupt information from lowcore 626 * Get interrupt information from lowcore
626 */ 627 */
627 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 628 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
628 irb = (struct irb *) __LC_IRB; 629 irb = (struct irb *)&S390_lowcore.irb;
629 do { 630 do {
630 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
631 /* 632 /*
@@ -660,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
660 * We don't do this for VM because a tpi drops the cpu 661 * We don't do this for VM because a tpi drops the cpu
661 * out of the sie which costs more cycles than it saves. 662 * out of the sie which costs more cycles than it saves.
662 */ 663 */
663 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 664 } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
664 irq_exit(); 665 irq_exit();
665 set_irq_regs(old_regs); 666 set_irq_regs(old_regs);
666} 667}
@@ -681,10 +682,10 @@ static int cio_tpi(void)
681 struct irb *irb; 682 struct irb *irb;
682 int irq_context; 683 int irq_context;
683 684
684 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 685 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
685 if (tpi(NULL) != 1) 686 if (tpi(NULL) != 1)
686 return 0; 687 return 0;
687 irb = (struct irb *) __LC_IRB; 688 irb = (struct irb *)&S390_lowcore.irb;
688 /* Store interrupt response block to lowcore. */ 689 /* Store interrupt response block to lowcore. */
689 if (tsch(tpi_info->schid, irb) != 0) 690 if (tsch(tpi_info->schid, irb) != 0)
690 /* Not status pending or not operational. */ 691 /* Not status pending or not operational. */
@@ -770,7 +771,7 @@ cio_get_console_sch_no(void)
770 if (console_irq != -1) { 771 if (console_irq != -1) {
771 /* VM provided us with the irq number of the console. */ 772 /* VM provided us with the irq number of the console. */
772 schid.sch_no = console_irq; 773 schid.sch_no = console_irq;
773 if (stsch(schid, &console_subchannel.schib) != 0 || 774 if (stsch_err(schid, &console_subchannel.schib) != 0 ||
774 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || 775 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
775 !console_subchannel.schib.pmcw.dnv) 776 !console_subchannel.schib.pmcw.dnv)
776 return -1; 777 return -1;
@@ -862,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
862 cc = 0; 863 cc = 0;
863 for (retry=0;retry<3;retry++) { 864 for (retry=0;retry<3;retry++) {
864 schib->pmcw.ena = 0; 865 schib->pmcw.ena = 0;
865 cc = msch(schid, schib); 866 cc = msch_err(schid, schib);
866 if (cc) 867 if (cc)
867 return (cc==3?-ENODEV:-EBUSY); 868 return (cc==3?-ENODEV:-EBUSY);
868 if (stsch(schid, schib) || !css_sch_is_valid(schib)) 869 if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
869 return -ENODEV; 870 return -ENODEV;
870 if (!schib->pmcw.ena) 871 if (!schib->pmcw.ena)
871 return 0; 872 return 0;
@@ -884,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
884 struct tpi_info ti; 885 struct tpi_info ti;
885 886
886 if (tpi(&ti)) { 887 if (tpi(&ti)) {
887 tsch(ti.schid, (struct irb *)__LC_IRB); 888 tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
888 if (schid_equal(&ti.schid, &schid)) 889 if (schid_equal(&ti.schid, &schid))
889 return 0; 890 return 0;
890 } 891 }
@@ -912,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
912 913
913 pgm_check_occured = 0; 914 pgm_check_occured = 0;
914 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 915 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
915 rc = stsch(schid, addr); 916 rc = stsch_err(schid, addr);
916 s390_base_pgm_handler_fn = NULL; 917 s390_base_pgm_handler_fn = NULL;
917 918
918 /* The program check handler could have changed pgm_check_occured. */ 919 /* The program check handler could have changed pgm_check_occured. */
@@ -949,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
949 /* No default clear strategy */ 950 /* No default clear strategy */
950 break; 951 break;
951 } 952 }
952 stsch(schid, &schib); 953 stsch_err(schid, &schib);
953 __disable_subchannel_easy(schid, &schib); 954 __disable_subchannel_easy(schid, &schib);
954 } 955 }
955out: 956out:
@@ -1082,10 +1083,10 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1082 struct subchannel_id schid; 1083 struct subchannel_id schid;
1083 struct schib schib; 1084 struct schib schib;
1084 1085
1085 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1086 if (!schid.one) 1087 if (!schid.one)
1087 return -ENODEV; 1088 return -ENODEV;
1088 if (stsch(schid, &schib)) 1089 if (stsch_err(schid, &schib))
1089 return -ENODEV; 1090 return -ENODEV;
1090 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 1091 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1091 return -ENODEV; 1092 return -ENODEV;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704b..bf7f80f5a330 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71enum sch_todo {
72 SCH_TODO_NOTHING,
73 SCH_TODO_UNREG,
74};
75
71/* subchannel data structure used by I/O subroutines */ 76/* subchannel data structure used by I/O subroutines */
72struct subchannel { 77struct subchannel {
73 struct subchannel_id schid; 78 struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
95 struct device dev; /* entry in device tree */ 100 struct device dev; /* entry in device tree */
96 struct css_driver *driver; 101 struct css_driver *driver;
97 void *private; /* private per subchannel type data */ 102 void *private; /* private per subchannel type data */
98 struct work_struct work; 103 enum sch_todo todo;
104 struct work_struct todo_work;
99 struct schib_config config; 105 struct schib_config config;
100} __attribute__ ((aligned(8))); 106} __attribute__ ((aligned(8)));
101 107
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 30f516111307..2985eb439485 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -462,7 +462,7 @@ static struct cmb_area cmb_area = {
462 * block of memory, which can not be moved as long as any channel 462 * block of memory, which can not be moved as long as any channel
463 * is active. Therefore, a maximum number of subchannels needs to 463 * is active. Therefore, a maximum number of subchannels needs to
464 * be defined somewhere. This is a module parameter, defaulting to 464 * be defined somewhere. This is a module parameter, defaulting to
465 * a resonable value of 1024, or 32 kb of memory. 465 * a reasonable value of 1024, or 32 kb of memory.
466 * Current kernels don't allow kmalloc with more than 128kb, so the 466 * Current kernels don't allow kmalloc with more than 128kb, so the
467 * maximum is 4096. 467 * maximum is 4096.
468 */ 468 */
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e76..425f741a280c 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -8,15 +8,16 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/semaphore.h>
12#include <linux/mutex.h> 11#include <linux/mutex.h>
13#include <linux/kthread.h> 12#include <linux/kthread.h>
14#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h>
15#include <asm/crw.h> 15#include <asm/crw.h>
16 16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex); 17static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS]; 18static crw_handler_t crw_handlers[NR_RSCS];
19static atomic_t crw_nr_req = ATOMIC_INIT(0);
20static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
20 21
21/** 22/**
22 * crw_register_handler() - register a channel report word handler 23 * crw_register_handler() - register a channel report word handler
@@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
59static int crw_collect_info(void *unused) 60static int crw_collect_info(void *unused)
60{ 61{
61 struct crw crw[2]; 62 struct crw crw[2];
62 int ccode; 63 int ccode, signal;
63 unsigned int chain; 64 unsigned int chain;
64 int ignore;
65 65
66repeat: 66repeat:
67 ignore = down_interruptible(&crw_semaphore); 67 signal = wait_event_interruptible(crw_handler_wait_q,
68 atomic_read(&crw_nr_req) > 0);
69 if (unlikely(signal))
70 atomic_inc(&crw_nr_req);
68 chain = 0; 71 chain = 0;
69 while (1) { 72 while (1) {
70 crw_handler_t handler; 73 crw_handler_t handler;
@@ -122,25 +125,23 @@ repeat:
122 /* chain is always 0 or 1 here. */ 125 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0; 126 chain = crw[chain].chn ? chain + 1 : 0;
124 } 127 }
128 if (atomic_dec_and_test(&crw_nr_req))
129 wake_up(&crw_handler_wait_q);
125 goto repeat; 130 goto repeat;
126 return 0; 131 return 0;
127} 132}
128 133
129void crw_handle_channel_report(void) 134void crw_handle_channel_report(void)
130{ 135{
131 up(&crw_semaphore); 136 atomic_inc(&crw_nr_req);
137 wake_up(&crw_handler_wait_q);
132} 138}
133 139
134/* 140void crw_wait_for_channel_report(void)
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{ 141{
140 init_MUTEX_LOCKED(&crw_semaphore); 142 crw_handle_channel_report();
141 return 0; 143 wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
142} 144}
143pure_initcall(crw_init_semaphore);
144 145
145/* 146/*
146 * Machine checks for the channel subsystem must be enabled 147 * Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa83..511649115bd7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/proc_fs.h>
21#include <asm/isc.h> 22#include <asm/isc.h>
22#include <asm/crw.h> 23#include <asm/crw.h>
23 24
@@ -133,6 +134,8 @@ out:
133 return rc; 134 return rc;
134} 135}
135 136
137static void css_sch_todo(struct work_struct *work);
138
136static struct subchannel * 139static struct subchannel *
137css_alloc_subchannel(struct subchannel_id schid) 140css_alloc_subchannel(struct subchannel_id schid)
138{ 141{
@@ -147,6 +150,7 @@ css_alloc_subchannel(struct subchannel_id schid)
147 kfree(sch); 150 kfree(sch);
148 return ERR_PTR(ret); 151 return ERR_PTR(ret);
149 } 152 }
153 INIT_WORK(&sch->todo_work, css_sch_todo);
150 return sch; 154 return sch;
151} 155}
152 156
@@ -190,6 +194,51 @@ void css_sch_device_unregister(struct subchannel *sch)
190} 194}
191EXPORT_SYMBOL_GPL(css_sch_device_unregister); 195EXPORT_SYMBOL_GPL(css_sch_device_unregister);
192 196
197static void css_sch_todo(struct work_struct *work)
198{
199 struct subchannel *sch;
200 enum sch_todo todo;
201
202 sch = container_of(work, struct subchannel, todo_work);
203 /* Find out todo. */
204 spin_lock_irq(sch->lock);
205 todo = sch->todo;
206 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
207 sch->schid.sch_no, todo);
208 sch->todo = SCH_TODO_NOTHING;
209 spin_unlock_irq(sch->lock);
210 /* Perform todo. */
211 if (todo == SCH_TODO_UNREG)
212 css_sch_device_unregister(sch);
213 /* Release workqueue ref. */
214 put_device(&sch->dev);
215}
216
217/**
218 * css_sched_sch_todo - schedule a subchannel operation
219 * @sch: subchannel
220 * @todo: todo
221 *
222 * Schedule the operation identified by @todo to be performed on the slow path
223 * workqueue. Do nothing if another operation with higher priority is already
224 * scheduled. Needs to be called with subchannel lock held.
225 */
226void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
227{
228 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
229 sch->schid.ssid, sch->schid.sch_no, todo);
230 if (sch->todo >= todo)
231 return;
232 /* Get workqueue ref. */
233 if (!get_device(&sch->dev))
234 return;
235 sch->todo = todo;
236 if (!queue_work(cio_work_q, &sch->todo_work)) {
237 /* Already queued, release workqueue ref. */
238 put_device(&sch->dev);
239 }
240}
241
193static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 242static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
194{ 243{
195 int i; 244 int i;
@@ -376,8 +425,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
376 /* Unusable - ignore. */ 425 /* Unusable - ignore. */
377 return 0; 426 return 0;
378 } 427 }
379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 428 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
380 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 429 schid.sch_no);
381 430
382 return css_probe_device(schid); 431 return css_probe_device(schid);
383} 432}
@@ -394,6 +443,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
394 "Got subchannel machine check but " 443 "Got subchannel machine check but "
395 "no sch_event handler provided.\n"); 444 "no sch_event handler provided.\n");
396 } 445 }
446 if (ret != 0 && ret != -EAGAIN) {
447 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
448 sch->schid.ssid, sch->schid.sch_no, ret);
449 }
397 return ret; 450 return ret;
398} 451}
399 452
@@ -491,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
491} 544}
492 545
493static DECLARE_WORK(slow_path_work, css_slow_path_func); 546static DECLARE_WORK(slow_path_work, css_slow_path_func);
494struct workqueue_struct *slow_path_wq; 547struct workqueue_struct *cio_work_q;
495 548
496void css_schedule_eval(struct subchannel_id schid) 549void css_schedule_eval(struct subchannel_id schid)
497{ 550{
@@ -500,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
500 spin_lock_irqsave(&slow_subchannel_lock, flags); 553 spin_lock_irqsave(&slow_subchannel_lock, flags);
501 idset_sch_add(slow_subchannel_set, schid); 554 idset_sch_add(slow_subchannel_set, schid);
502 atomic_set(&css_eval_scheduled, 1); 555 atomic_set(&css_eval_scheduled, 1);
503 queue_work(slow_path_wq, &slow_path_work); 556 queue_work(cio_work_q, &slow_path_work);
504 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
505} 558}
506 559
@@ -511,7 +564,7 @@ void css_schedule_eval_all(void)
511 spin_lock_irqsave(&slow_subchannel_lock, flags); 564 spin_lock_irqsave(&slow_subchannel_lock, flags);
512 idset_fill(slow_subchannel_set); 565 idset_fill(slow_subchannel_set);
513 atomic_set(&css_eval_scheduled, 1); 566 atomic_set(&css_eval_scheduled, 1);
514 queue_work(slow_path_wq, &slow_path_work); 567 queue_work(cio_work_q, &slow_path_work);
515 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
516} 569}
517 570
@@ -542,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
542 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 spin_lock_irqsave(&slow_subchannel_lock, flags);
543 idset_add_set(slow_subchannel_set, unreg_set); 596 idset_add_set(slow_subchannel_set, unreg_set);
544 atomic_set(&css_eval_scheduled, 1); 597 atomic_set(&css_eval_scheduled, 1);
545 queue_work(slow_path_wq, &slow_path_work); 598 queue_work(cio_work_q, &slow_path_work);
546 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
547 idset_free(unreg_set); 600 idset_free(unreg_set);
548} 601}
549 602
550void css_wait_for_slow_path(void) 603void css_wait_for_slow_path(void)
551{ 604{
552 flush_workqueue(slow_path_wq); 605 flush_workqueue(cio_work_q);
553} 606}
554 607
555/* Schedule reprobing of all unregistered subchannels. */ 608/* Schedule reprobing of all unregistered subchannels. */
@@ -684,6 +737,7 @@ static int __init setup_css(int nr)
684 css->pseudo_subchannel->dev.parent = &css->device; 737 css->pseudo_subchannel->dev.parent = &css->device;
685 css->pseudo_subchannel->dev.release = css_subchannel_release; 738 css->pseudo_subchannel->dev.release = css_subchannel_release;
686 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 739 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
740 mutex_init(&css->pseudo_subchannel->reg_mutex);
687 ret = cio_create_sch_lock(css->pseudo_subchannel); 741 ret = cio_create_sch_lock(css->pseudo_subchannel);
688 if (ret) { 742 if (ret) {
689 kfree(css->pseudo_subchannel); 743 kfree(css->pseudo_subchannel);
@@ -816,15 +870,10 @@ static int __init css_bus_init(void)
816 870
817 /* Try to enable MSS. */ 871 /* Try to enable MSS. */
818 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 872 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
819 switch (ret) { 873 if (ret)
820 case 0: /* Success. */
821 max_ssid = __MAX_SSID;
822 break;
823 case -ENOMEM:
824 goto out;
825 default:
826 max_ssid = 0; 874 max_ssid = 0;
827 } 875 else /* Success. */
876 max_ssid = __MAX_SSID;
828 877
829 ret = slow_subchannel_init(); 878 ret = slow_subchannel_init();
830 if (ret) 879 if (ret)
@@ -939,12 +988,21 @@ static int __init channel_subsystem_init(void)
939 ret = css_bus_init(); 988 ret = css_bus_init();
940 if (ret) 989 if (ret)
941 return ret; 990 return ret;
942 991 cio_work_q = create_singlethread_workqueue("cio");
992 if (!cio_work_q) {
993 ret = -ENOMEM;
994 goto out_bus;
995 }
943 ret = io_subchannel_init(); 996 ret = io_subchannel_init();
944 if (ret) 997 if (ret)
945 css_bus_cleanup(); 998 goto out_wq;
946 999
947 return ret; 1000 return ret;
1001out_wq:
1002 destroy_workqueue(cio_work_q);
1003out_bus:
1004 css_bus_cleanup();
1005 return ret;
948} 1006}
949subsys_initcall(channel_subsystem_init); 1007subsys_initcall(channel_subsystem_init);
950 1008
@@ -953,10 +1011,25 @@ static int css_settle(struct device_driver *drv, void *unused)
953 struct css_driver *cssdrv = to_cssdriver(drv); 1011 struct css_driver *cssdrv = to_cssdriver(drv);
954 1012
955 if (cssdrv->settle) 1013 if (cssdrv->settle)
956 cssdrv->settle(); 1014 return cssdrv->settle();
957 return 0; 1015 return 0;
958} 1016}
959 1017
1018int css_complete_work(void)
1019{
1020 int ret;
1021
1022 /* Wait for the evaluation of subchannels to finish. */
1023 ret = wait_event_interruptible(css_eval_wq,
1024 atomic_read(&css_eval_scheduled) == 0);
1025 if (ret)
1026 return -EINTR;
1027 flush_workqueue(cio_work_q);
1028 /* Wait for the subchannel type specific initialization to finish */
1029 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1030}
1031
1032
960/* 1033/*
961 * Wait for the initialization of devices to finish, to make sure we are 1034 * Wait for the initialization of devices to finish, to make sure we are
962 * done with our setup if the search for the root device starts. 1035 * done with our setup if the search for the root device starts.
@@ -965,13 +1038,46 @@ static int __init channel_subsystem_init_sync(void)
965{ 1038{
966 /* Start initial subchannel evaluation. */ 1039 /* Start initial subchannel evaluation. */
967 css_schedule_eval_all(); 1040 css_schedule_eval_all();
968 /* Wait for the evaluation of subchannels to finish. */ 1041 css_complete_work();
969 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1042 return 0;
970 /* Wait for the subchannel type specific initialization to finish */
971 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
972} 1043}
973subsys_initcall_sync(channel_subsystem_init_sync); 1044subsys_initcall_sync(channel_subsystem_init_sync);
974 1045
1046void channel_subsystem_reinit(void)
1047{
1048 chsc_enable_facility(CHSC_SDA_OC_MSS);
1049}
1050
1051#ifdef CONFIG_PROC_FS
1052static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *ppos)
1054{
1055 int ret;
1056
1057 /* Handle pending CRW's. */
1058 crw_wait_for_channel_report();
1059 ret = css_complete_work();
1060
1061 return ret ? ret : count;
1062}
1063
1064static const struct file_operations cio_settle_proc_fops = {
1065 .write = cio_settle_write,
1066};
1067
1068static int __init cio_settle_init(void)
1069{
1070 struct proc_dir_entry *entry;
1071
1072 entry = proc_create("cio_settle", S_IWUSR, NULL,
1073 &cio_settle_proc_fops);
1074 if (!entry)
1075 return -ENOMEM;
1076 return 0;
1077}
1078device_initcall(cio_settle_init);
1079#endif /*CONFIG_PROC_FS*/
1080
975int sch_is_pseudo_sch(struct subchannel *sch) 1081int sch_is_pseudo_sch(struct subchannel *sch)
976{ 1082{
977 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1083 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
@@ -1095,7 +1201,7 @@ static int css_pm_restore(struct device *dev)
1095 return drv->restore ? drv->restore(sch) : 0; 1201 return drv->restore ? drv->restore(sch) : 0;
1096} 1202}
1097 1203
1098static struct dev_pm_ops css_pm_ops = { 1204static const struct dev_pm_ops css_pm_ops = {
1099 .prepare = css_pm_prepare, 1205 .prepare = css_pm_prepare,
1100 .complete = css_pm_complete, 1206 .complete = css_pm_complete,
1101 .freeze = css_pm_freeze, 1207 .freeze = css_pm_freeze,
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151c..7e37886de231 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12#include <asm/schid.h> 12#include <asm/schid.h>
13 13
14#include "cio.h"
15
14/* 16/*
15 * path grouping stuff 17 * path grouping stuff
16 */ 18 */
@@ -93,7 +95,7 @@ struct css_driver {
93 int (*freeze)(struct subchannel *); 95 int (*freeze)(struct subchannel *);
94 int (*thaw) (struct subchannel *); 96 int (*thaw) (struct subchannel *);
95 int (*restore)(struct subchannel *); 97 int (*restore)(struct subchannel *);
96 void (*settle)(void); 98 int (*settle)(void);
97 const char *name; 99 const char *name;
98}; 100};
99 101
@@ -144,11 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
144/* Helper functions to build lists for the slow path. */ 146/* Helper functions to build lists for the slow path. */
145void css_schedule_eval(struct subchannel_id schid); 147void css_schedule_eval(struct subchannel_id schid);
146void css_schedule_eval_all(void); 148void css_schedule_eval_all(void);
149int css_complete_work(void);
147 150
148int sch_is_pseudo_sch(struct subchannel *); 151int sch_is_pseudo_sch(struct subchannel *);
149struct schib; 152struct schib;
150int css_sch_is_valid(struct schib *); 153int css_sch_is_valid(struct schib *);
151 154
152extern struct workqueue_struct *slow_path_wq; 155extern struct workqueue_struct *cio_work_q;
153void css_wait_for_slow_path(void); 156void css_wait_for_slow_path(void);
157void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
154#endif 158#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 2490b741e16a..6d229f3523a0 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 9 */
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include <linux/module.h> 14#include <linux/module.h>
11#include <linux/init.h> 15#include <linux/init.h>
12#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -132,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
132static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
133 int); 137 int);
134static void recovery_func(unsigned long data); 138static void recovery_func(unsigned long data);
135struct workqueue_struct *ccw_device_work;
136wait_queue_head_t ccw_device_init_wq; 139wait_queue_head_t ccw_device_init_wq;
137atomic_t ccw_device_init_count; 140atomic_t ccw_device_init_count;
138 141
@@ -155,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
155 return 0; 158 return 0;
156} 159}
157 160
158static void io_subchannel_settle(void) 161static int io_subchannel_settle(void)
159{ 162{
160 wait_event(ccw_device_init_wq, 163 int ret;
161 atomic_read(&ccw_device_init_count) == 0); 164
162 flush_workqueue(ccw_device_work); 165 ret = wait_event_interruptible(ccw_device_init_wq,
166 atomic_read(&ccw_device_init_count) == 0);
167 if (ret)
168 return -EINTR;
169 flush_workqueue(cio_work_q);
170 return 0;
163} 171}
164 172
165static struct css_driver io_subchannel_driver = { 173static struct css_driver io_subchannel_driver = {
@@ -184,27 +192,13 @@ int __init io_subchannel_init(void)
184 atomic_set(&ccw_device_init_count, 0); 192 atomic_set(&ccw_device_init_count, 0);
185 setup_timer(&recovery_timer, recovery_func, 0); 193 setup_timer(&recovery_timer, recovery_func, 0);
186 194
187 ccw_device_work = create_singlethread_workqueue("cio"); 195 ret = bus_register(&ccw_bus_type);
188 if (!ccw_device_work) 196 if (ret)
189 return -ENOMEM; 197 return ret;
190 slow_path_wq = create_singlethread_workqueue("kslowcrw");
191 if (!slow_path_wq) {
192 ret = -ENOMEM;
193 goto out_err;
194 }
195 if ((ret = bus_register (&ccw_bus_type)))
196 goto out_err;
197
198 ret = css_driver_register(&io_subchannel_driver); 198 ret = css_driver_register(&io_subchannel_driver);
199 if (ret) 199 if (ret)
200 goto out_err; 200 bus_unregister(&ccw_bus_type);
201 201
202 return 0;
203out_err:
204 if (ccw_device_work)
205 destroy_workqueue(ccw_device_work);
206 if (slow_path_wq)
207 destroy_workqueue(slow_path_wq);
208 return ret; 202 return ret;
209} 203}
210 204
@@ -299,53 +293,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
299 293
300static void ccw_device_unregister(struct ccw_device *cdev) 294static void ccw_device_unregister(struct ccw_device *cdev)
301{ 295{
302 if (test_and_clear_bit(1, &cdev->private->registered)) { 296 if (device_is_registered(&cdev->dev)) {
297 /* Undo device_add(). */
303 device_del(&cdev->dev); 298 device_del(&cdev->dev);
299 }
300 if (cdev->private->flags.initialized) {
301 cdev->private->flags.initialized = 0;
304 /* Release reference from device_initialize(). */ 302 /* Release reference from device_initialize(). */
305 put_device(&cdev->dev); 303 put_device(&cdev->dev);
306 } 304 }
307} 305}
308 306
309static void ccw_device_remove_orphan_cb(struct work_struct *work) 307static void io_subchannel_quiesce(struct subchannel *);
310{
311 struct ccw_device_private *priv;
312 struct ccw_device *cdev;
313
314 priv = container_of(work, struct ccw_device_private, kick_work);
315 cdev = priv->cdev;
316 ccw_device_unregister(cdev);
317 /* Release cdev reference for workqueue processing. */
318 put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324 unsigned long flags;
325
326 /*
327 * Forced offline in disconnected state means
328 * 'throw away device'.
329 */
330 if (ccw_device_is_orphan(cdev)) {
331 /*
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
334 * attribute method.
335 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 spin_lock_irqsave(cdev->ccwlock, flags);
340 cdev->private->state = DEV_STATE_NOT_OPER;
341 spin_unlock_irqrestore(cdev->ccwlock, flags);
342 PREPARE_WORK(&cdev->private->kick_work,
343 ccw_device_remove_orphan_cb);
344 queue_work(slow_path_wq, &cdev->private->kick_work);
345 } else
346 /* Deregister subchannel, which will kill the ccw device. */
347 ccw_device_schedule_sch_unregister(cdev);
348}
349 308
350/** 309/**
351 * ccw_device_set_offline() - disable a ccw device for I/O 310 * ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +319,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
360 */ 319 */
361int ccw_device_set_offline(struct ccw_device *cdev) 320int ccw_device_set_offline(struct ccw_device *cdev)
362{ 321{
363 int ret; 322 struct subchannel *sch;
323 int ret, state;
364 324
365 if (!cdev) 325 if (!cdev)
366 return -ENODEV; 326 return -ENODEV;
@@ -374,6 +334,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
374 } 334 }
375 cdev->online = 0; 335 cdev->online = 0;
376 spin_lock_irq(cdev->ccwlock); 336 spin_lock_irq(cdev->ccwlock);
337 sch = to_subchannel(cdev->dev.parent);
377 /* Wait until a final state or DISCONNECTED is reached */ 338 /* Wait until a final state or DISCONNECTED is reached */
378 while (!dev_fsm_final_state(cdev) && 339 while (!dev_fsm_final_state(cdev) &&
379 cdev->private->state != DEV_STATE_DISCONNECTED) { 340 cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +343,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
382 cdev->private->state == DEV_STATE_DISCONNECTED)); 343 cdev->private->state == DEV_STATE_DISCONNECTED));
383 spin_lock_irq(cdev->ccwlock); 344 spin_lock_irq(cdev->ccwlock);
384 } 345 }
385 ret = ccw_device_offline(cdev); 346 do {
386 if (ret) 347 ret = ccw_device_offline(cdev);
387 goto error; 348 if (!ret)
349 break;
350 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
351 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
352 cdev->private->dev_id.devno);
353 if (ret != -EBUSY)
354 goto error;
355 state = cdev->private->state;
356 spin_unlock_irq(cdev->ccwlock);
357 io_subchannel_quiesce(sch);
358 spin_lock_irq(cdev->ccwlock);
359 cdev->private->state = state;
360 } while (ret == -EBUSY);
388 spin_unlock_irq(cdev->ccwlock); 361 spin_unlock_irq(cdev->ccwlock);
389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 362 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390 cdev->private->state == DEV_STATE_DISCONNECTED)); 363 cdev->private->state == DEV_STATE_DISCONNECTED));
364 /* Inform the user if set offline failed. */
365 if (cdev->private->state == DEV_STATE_BOXED) {
366 pr_warning("%s: The device entered boxed state while "
367 "being set offline\n", dev_name(&cdev->dev));
368 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
369 pr_warning("%s: The device stopped operating while "
370 "being set offline\n", dev_name(&cdev->dev));
371 }
391 /* Give up reference from ccw_device_set_online(). */ 372 /* Give up reference from ccw_device_set_online(). */
392 put_device(&cdev->dev); 373 put_device(&cdev->dev);
393 return 0; 374 return 0;
394 375
395error: 376error:
396 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397 ret, cdev->private->dev_id.ssid,
398 cdev->private->dev_id.devno);
399 cdev->private->state = DEV_STATE_OFFLINE; 377 cdev->private->state = DEV_STATE_OFFLINE;
400 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 378 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401 spin_unlock_irq(cdev->ccwlock); 379 spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +426,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
448 if ((cdev->private->state != DEV_STATE_ONLINE) && 426 if ((cdev->private->state != DEV_STATE_ONLINE) &&
449 (cdev->private->state != DEV_STATE_W4SENSE)) { 427 (cdev->private->state != DEV_STATE_W4SENSE)) {
450 spin_unlock_irq(cdev->ccwlock); 428 spin_unlock_irq(cdev->ccwlock);
429 /* Inform the user that set online failed. */
430 if (cdev->private->state == DEV_STATE_BOXED) {
431 pr_warning("%s: Setting the device online failed "
432 "because it is boxed\n",
433 dev_name(&cdev->dev));
434 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
435 pr_warning("%s: Setting the device online failed "
436 "because it is not operational\n",
437 dev_name(&cdev->dev));
438 }
451 /* Give up online reference since onlining failed. */ 439 /* Give up online reference since onlining failed. */
452 put_device(&cdev->dev); 440 put_device(&cdev->dev);
453 return -ENODEV; 441 return -ENODEV;
@@ -494,27 +482,22 @@ error:
494 482
495static int online_store_handle_offline(struct ccw_device *cdev) 483static int online_store_handle_offline(struct ccw_device *cdev)
496{ 484{
497 if (cdev->private->state == DEV_STATE_DISCONNECTED) 485 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
498 ccw_device_remove_disconnected(cdev); 486 spin_lock_irq(cdev->ccwlock);
499 else if (cdev->online && cdev->drv && cdev->drv->set_offline) 487 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
488 spin_unlock_irq(cdev->ccwlock);
489 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 return ccw_device_set_offline(cdev); 490 return ccw_device_set_offline(cdev);
501 return 0; 491 return 0;
502} 492}
503 493
504static int online_store_recog_and_online(struct ccw_device *cdev) 494static int online_store_recog_and_online(struct ccw_device *cdev)
505{ 495{
506 int ret;
507
508 /* Do device recognition, if needed. */ 496 /* Do device recognition, if needed. */
509 if (cdev->private->state == DEV_STATE_BOXED) { 497 if (cdev->private->state == DEV_STATE_BOXED) {
510 ret = ccw_device_recognition(cdev); 498 spin_lock_irq(cdev->ccwlock);
511 if (ret) { 499 ccw_device_recognition(cdev);
512 CIO_MSG_EVENT(0, "Couldn't start recognition " 500 spin_unlock_irq(cdev->ccwlock);
513 "for device 0.%x.%04x (ret=%d)\n",
514 cdev->private->dev_id.ssid,
515 cdev->private->dev_id.devno, ret);
516 return ret;
517 }
518 wait_event(cdev->private->wait_q, 501 wait_event(cdev->private->wait_q,
519 cdev->private->flags.recog_done); 502 cdev->private->flags.recog_done);
520 if (cdev->private->state != DEV_STATE_OFFLINE) 503 if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +536,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
553 int force, ret; 536 int force, ret;
554 unsigned long i; 537 unsigned long i;
555 538
556 if ((cdev->private->state != DEV_STATE_OFFLINE && 539 if (!dev_fsm_final_state(cdev) &&
557 cdev->private->state != DEV_STATE_ONLINE && 540 cdev->private->state != DEV_STATE_DISCONNECTED)
558 cdev->private->state != DEV_STATE_BOXED && 541 return -EAGAIN;
559 cdev->private->state != DEV_STATE_DISCONNECTED) || 542 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
560 atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561 return -EAGAIN; 543 return -EAGAIN;
562 544
563 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 545 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +647,31 @@ static int ccw_device_register(struct ccw_device *cdev)
665 cdev->private->dev_id.devno); 647 cdev->private->dev_id.devno);
666 if (ret) 648 if (ret)
667 return ret; 649 return ret;
668 ret = device_add(dev); 650 return device_add(dev);
669 if (ret)
670 return ret;
671
672 set_bit(1, &cdev->private->registered);
673 return ret;
674}
675
676struct match_data {
677 struct ccw_dev_id dev_id;
678 struct ccw_device * sibling;
679};
680
681static int
682match_devno(struct device * dev, void * data)
683{
684 struct match_data * d = data;
685 struct ccw_device * cdev;
686
687 cdev = to_ccwdev(dev);
688 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
689 !ccw_device_is_orphan(cdev) &&
690 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
691 (cdev != d->sibling))
692 return 1;
693 return 0;
694} 651}
695 652
696static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, 653static int match_dev_id(struct device *dev, void *data)
697 struct ccw_device *sibling)
698{ 654{
699 struct device *dev; 655 struct ccw_device *cdev = to_ccwdev(dev);
700 struct match_data data; 656 struct ccw_dev_id *dev_id = data;
701
702 data.dev_id = *dev_id;
703 data.sibling = sibling;
704 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
705
706 return dev ? to_ccwdev(dev) : NULL;
707}
708
709static int match_orphan(struct device *dev, void *data)
710{
711 struct ccw_dev_id *dev_id;
712 struct ccw_device *cdev;
713 657
714 dev_id = data;
715 cdev = to_ccwdev(dev);
716 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 658 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
717} 659}
718 660
719static struct ccw_device * 661static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
720get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
721 struct ccw_dev_id *dev_id)
722{ 662{
723 struct device *dev; 663 struct device *dev;
724 664
725 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 665 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
726 match_orphan);
727 666
728 return dev ? to_ccwdev(dev) : NULL; 667 return dev ? to_ccwdev(dev) : NULL;
729} 668}
730 669
731void ccw_device_do_unbind_bind(struct work_struct *work) 670static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
732{ 671{
733 struct ccw_device_private *priv;
734 struct ccw_device *cdev;
735 struct subchannel *sch;
736 int ret; 672 int ret;
737 673
738 priv = container_of(work, struct ccw_device_private, kick_work); 674 if (device_is_registered(&cdev->dev)) {
739 cdev = priv->cdev;
740 sch = to_subchannel(cdev->dev.parent);
741
742 if (test_bit(1, &cdev->private->registered)) {
743 device_release_driver(&cdev->dev); 675 device_release_driver(&cdev->dev);
744 ret = device_attach(&cdev->dev); 676 ret = device_attach(&cdev->dev);
745 WARN_ON(ret == -ENODEV); 677 WARN_ON(ret == -ENODEV);
@@ -773,6 +705,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
773 return ERR_PTR(-ENOMEM); 705 return ERR_PTR(-ENOMEM);
774} 706}
775 707
708static void ccw_device_todo(struct work_struct *work);
709
776static int io_subchannel_initialize_dev(struct subchannel *sch, 710static int io_subchannel_initialize_dev(struct subchannel *sch,
777 struct ccw_device *cdev) 711 struct ccw_device *cdev)
778{ 712{
@@ -780,7 +714,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
780 atomic_set(&cdev->private->onoff, 0); 714 atomic_set(&cdev->private->onoff, 0);
781 cdev->dev.parent = &sch->dev; 715 cdev->dev.parent = &sch->dev;
782 cdev->dev.release = ccw_device_release; 716 cdev->dev.release = ccw_device_release;
783 INIT_WORK(&cdev->private->kick_work, NULL); 717 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
784 cdev->dev.groups = ccwdev_attr_groups; 718 cdev->dev.groups = ccwdev_attr_groups;
785 /* Do first half of device_register. */ 719 /* Do first half of device_register. */
786 device_initialize(&cdev->dev); 720 device_initialize(&cdev->dev);
@@ -789,6 +723,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
789 put_device(&cdev->dev); 723 put_device(&cdev->dev);
790 return -ENODEV; 724 return -ENODEV;
791 } 725 }
726 cdev->private->flags.initialized = 1;
792 return 0; 727 return 0;
793} 728}
794 729
@@ -806,76 +741,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
806 return cdev; 741 return cdev;
807} 742}
808 743
809static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 744static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
810
811static void sch_attach_device(struct subchannel *sch,
812 struct ccw_device *cdev)
813{
814 css_update_ssd_info(sch);
815 spin_lock_irq(sch->lock);
816 sch_set_cdev(sch, cdev);
817 cdev->private->schid = sch->schid;
818 cdev->ccwlock = sch->lock;
819 ccw_device_trigger_reprobe(cdev);
820 spin_unlock_irq(sch->lock);
821}
822
823static void sch_attach_disconnected_device(struct subchannel *sch,
824 struct ccw_device *cdev)
825{
826 struct subchannel *other_sch;
827 int ret;
828
829 /* Get reference for new parent. */
830 if (!get_device(&sch->dev))
831 return;
832 other_sch = to_subchannel(cdev->dev.parent);
833 /* Note: device_move() changes cdev->dev.parent */
834 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
835 if (ret) {
836 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
837 "(ret=%d)!\n", cdev->private->dev_id.ssid,
838 cdev->private->dev_id.devno, ret);
839 /* Put reference for new parent. */
840 put_device(&sch->dev);
841 return;
842 }
843 sch_set_cdev(other_sch, NULL);
844 /* No need to keep a subchannel without ccw device around. */
845 css_sch_device_unregister(other_sch);
846 sch_attach_device(sch, cdev);
847 /* Put reference for old parent. */
848 put_device(&other_sch->dev);
849}
850
851static void sch_attach_orphaned_device(struct subchannel *sch,
852 struct ccw_device *cdev)
853{
854 int ret;
855 struct subchannel *pseudo_sch;
856
857 /* Get reference for new parent. */
858 if (!get_device(&sch->dev))
859 return;
860 pseudo_sch = to_subchannel(cdev->dev.parent);
861 /*
862 * Try to move the ccw device to its new subchannel.
863 * Note: device_move() changes cdev->dev.parent
864 */
865 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
866 if (ret) {
867 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
868 "failed (ret=%d)!\n",
869 cdev->private->dev_id.ssid,
870 cdev->private->dev_id.devno, ret);
871 /* Put reference for new parent. */
872 put_device(&sch->dev);
873 return;
874 }
875 sch_attach_device(sch, cdev);
876 /* Put reference on pseudo subchannel. */
877 put_device(&pseudo_sch->dev);
878}
879 745
880static void sch_create_and_recog_new_device(struct subchannel *sch) 746static void sch_create_and_recog_new_device(struct subchannel *sch)
881{ 747{
@@ -888,100 +754,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
888 css_sch_device_unregister(sch); 754 css_sch_device_unregister(sch);
889 return; 755 return;
890 } 756 }
891 spin_lock_irq(sch->lock);
892 sch_set_cdev(sch, cdev);
893 spin_unlock_irq(sch->lock);
894 /* Start recognition for the new ccw device. */ 757 /* Start recognition for the new ccw device. */
895 if (io_subchannel_recog(cdev, sch)) { 758 io_subchannel_recog(cdev, sch);
896 spin_lock_irq(sch->lock);
897 sch_set_cdev(sch, NULL);
898 spin_unlock_irq(sch->lock);
899 css_sch_device_unregister(sch);
900 /* Put reference from io_subchannel_create_ccwdev(). */
901 put_device(&sch->dev);
902 /* Give up initial reference. */
903 put_device(&cdev->dev);
904 }
905}
906
907
908void ccw_device_move_to_orphanage(struct work_struct *work)
909{
910 struct ccw_device_private *priv;
911 struct ccw_device *cdev;
912 struct ccw_device *replacing_cdev;
913 struct subchannel *sch;
914 int ret;
915 struct channel_subsystem *css;
916 struct ccw_dev_id dev_id;
917
918 priv = container_of(work, struct ccw_device_private, kick_work);
919 cdev = priv->cdev;
920 sch = to_subchannel(cdev->dev.parent);
921 css = to_css(sch->dev.parent);
922 dev_id.devno = sch->schib.pmcw.dev;
923 dev_id.ssid = sch->schid.ssid;
924
925 /* Increase refcount for pseudo subchannel. */
926 get_device(&css->pseudo_subchannel->dev);
927 /*
928 * Move the orphaned ccw device to the orphanage so the replacing
929 * ccw device can take its place on the subchannel.
930 * Note: device_move() changes cdev->dev.parent
931 */
932 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
933 DPM_ORDER_NONE);
934 if (ret) {
935 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
936 "(ret=%d)!\n", cdev->private->dev_id.ssid,
937 cdev->private->dev_id.devno, ret);
938 /* Decrease refcount for pseudo subchannel again. */
939 put_device(&css->pseudo_subchannel->dev);
940 return;
941 }
942 cdev->ccwlock = css->pseudo_subchannel->lock;
943 /*
944 * Search for the replacing ccw device
945 * - among the disconnected devices
946 * - in the orphanage
947 */
948 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
949 if (replacing_cdev) {
950 sch_attach_disconnected_device(sch, replacing_cdev);
951 /* Release reference from get_disc_ccwdev_by_dev_id() */
952 put_device(&replacing_cdev->dev);
953 /* Release reference of subchannel from old cdev. */
954 put_device(&sch->dev);
955 return;
956 }
957 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
958 if (replacing_cdev) {
959 sch_attach_orphaned_device(sch, replacing_cdev);
960 /* Release reference from get_orphaned_ccwdev_by_dev_id() */
961 put_device(&replacing_cdev->dev);
962 /* Release reference of subchannel from old cdev. */
963 put_device(&sch->dev);
964 return;
965 }
966 sch_create_and_recog_new_device(sch);
967 /* Release reference of subchannel from old cdev. */
968 put_device(&sch->dev);
969} 759}
970 760
971/* 761/*
972 * Register recognized device. 762 * Register recognized device.
973 */ 763 */
974static void 764static void io_subchannel_register(struct ccw_device *cdev)
975io_subchannel_register(struct work_struct *work)
976{ 765{
977 struct ccw_device_private *priv;
978 struct ccw_device *cdev;
979 struct subchannel *sch; 766 struct subchannel *sch;
980 int ret; 767 int ret, adjust_init_count = 1;
981 unsigned long flags; 768 unsigned long flags;
982 769
983 priv = container_of(work, struct ccw_device_private, kick_work);
984 cdev = priv->cdev;
985 sch = to_subchannel(cdev->dev.parent); 770 sch = to_subchannel(cdev->dev.parent);
986 /* 771 /*
987 * Check if subchannel is still registered. It may have become 772 * Check if subchannel is still registered. It may have become
@@ -1008,6 +793,7 @@ io_subchannel_register(struct work_struct *work)
1008 cdev->private->dev_id.ssid, 793 cdev->private->dev_id.ssid,
1009 cdev->private->dev_id.devno); 794 cdev->private->dev_id.devno);
1010 } 795 }
796 adjust_init_count = 0;
1011 goto out; 797 goto out;
1012 } 798 }
1013 /* 799 /*
@@ -1033,41 +819,23 @@ out:
1033 cdev->private->flags.recog_done = 1; 819 cdev->private->flags.recog_done = 1;
1034 wake_up(&cdev->private->wait_q); 820 wake_up(&cdev->private->wait_q);
1035out_err: 821out_err:
1036 /* Release reference for workqueue processing. */ 822 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
1037 put_device(&cdev->dev);
1038 if (atomic_dec_and_test(&ccw_device_init_count))
1039 wake_up(&ccw_device_init_wq); 823 wake_up(&ccw_device_init_wq);
1040} 824}
1041 825
1042static void ccw_device_call_sch_unregister(struct work_struct *work) 826static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
1043{ 827{
1044 struct ccw_device_private *priv;
1045 struct ccw_device *cdev;
1046 struct subchannel *sch; 828 struct subchannel *sch;
1047 829
1048 priv = container_of(work, struct ccw_device_private, kick_work);
1049 cdev = priv->cdev;
1050 /* Get subchannel reference for local processing. */ 830 /* Get subchannel reference for local processing. */
1051 if (!get_device(cdev->dev.parent)) 831 if (!get_device(cdev->dev.parent))
1052 return; 832 return;
1053 sch = to_subchannel(cdev->dev.parent); 833 sch = to_subchannel(cdev->dev.parent);
1054 css_sch_device_unregister(sch); 834 css_sch_device_unregister(sch);
1055 /* Release cdev reference for workqueue processing.*/
1056 put_device(&cdev->dev);
1057 /* Release subchannel reference for local processing. */ 835 /* Release subchannel reference for local processing. */
1058 put_device(&sch->dev); 836 put_device(&sch->dev);
1059} 837}
1060 838
1061void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1062{
1063 /* Get cdev reference for workqueue processing. */
1064 if (!get_device(&cdev->dev))
1065 return;
1066 PREPARE_WORK(&cdev->private->kick_work,
1067 ccw_device_call_sch_unregister);
1068 queue_work(slow_path_wq, &cdev->private->kick_work);
1069}
1070
1071/* 839/*
1072 * subchannel recognition done. Called from the state machine. 840 * subchannel recognition done. Called from the state machine.
1073 */ 841 */
@@ -1083,7 +851,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1083 /* Device did not respond in time. */ 851 /* Device did not respond in time. */
1084 case DEV_STATE_NOT_OPER: 852 case DEV_STATE_NOT_OPER:
1085 cdev->private->flags.recog_done = 1; 853 cdev->private->flags.recog_done = 1;
1086 ccw_device_schedule_sch_unregister(cdev); 854 /* Remove device found not operational. */
855 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1087 if (atomic_dec_and_test(&ccw_device_init_count)) 856 if (atomic_dec_and_test(&ccw_device_init_count))
1088 wake_up(&ccw_device_init_wq); 857 wake_up(&ccw_device_init_wq);
1089 break; 858 break;
@@ -1092,22 +861,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1092 * We can't register the device in interrupt context so 861 * We can't register the device in interrupt context so
1093 * we schedule a work item. 862 * we schedule a work item.
1094 */ 863 */
1095 if (!get_device(&cdev->dev)) 864 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
1096 break;
1097 PREPARE_WORK(&cdev->private->kick_work,
1098 io_subchannel_register);
1099 queue_work(slow_path_wq, &cdev->private->kick_work);
1100 break; 865 break;
1101 } 866 }
1102} 867}
1103 868
1104static int 869static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1105io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1106{ 870{
1107 int rc;
1108 struct ccw_device_private *priv; 871 struct ccw_device_private *priv;
1109 872
1110 sch_set_cdev(sch, cdev);
1111 cdev->ccwlock = sch->lock; 873 cdev->ccwlock = sch->lock;
1112 874
1113 /* Init private data. */ 875 /* Init private data. */
@@ -1125,62 +887,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1125 887
1126 /* Start async. device sensing. */ 888 /* Start async. device sensing. */
1127 spin_lock_irq(sch->lock); 889 spin_lock_irq(sch->lock);
1128 rc = ccw_device_recognition(cdev); 890 sch_set_cdev(sch, cdev);
891 ccw_device_recognition(cdev);
1129 spin_unlock_irq(sch->lock); 892 spin_unlock_irq(sch->lock);
1130 if (rc) {
1131 if (atomic_dec_and_test(&ccw_device_init_count))
1132 wake_up(&ccw_device_init_wq);
1133 }
1134 return rc;
1135} 893}
1136 894
1137static void ccw_device_move_to_sch(struct work_struct *work) 895static int ccw_device_move_to_sch(struct ccw_device *cdev,
896 struct subchannel *sch)
1138{ 897{
1139 struct ccw_device_private *priv; 898 struct subchannel *old_sch;
1140 int rc; 899 int rc, old_enabled = 0;
1141 struct subchannel *sch;
1142 struct ccw_device *cdev;
1143 struct subchannel *former_parent;
1144 900
1145 priv = container_of(work, struct ccw_device_private, kick_work); 901 old_sch = to_subchannel(cdev->dev.parent);
1146 sch = priv->sch; 902 /* Obtain child reference for new parent. */
1147 cdev = priv->cdev;
1148 former_parent = to_subchannel(cdev->dev.parent);
1149 /* Get reference for new parent. */
1150 if (!get_device(&sch->dev)) 903 if (!get_device(&sch->dev))
1151 return; 904 return -ENODEV;
905
906 if (!sch_is_pseudo_sch(old_sch)) {
907 spin_lock_irq(old_sch->lock);
908 old_enabled = old_sch->schib.pmcw.ena;
909 rc = 0;
910 if (old_enabled)
911 rc = cio_disable_subchannel(old_sch);
912 spin_unlock_irq(old_sch->lock);
913 if (rc == -EBUSY) {
914 /* Release child reference for new parent. */
915 put_device(&sch->dev);
916 return rc;
917 }
918 }
919
1152 mutex_lock(&sch->reg_mutex); 920 mutex_lock(&sch->reg_mutex);
1153 /*
1154 * Try to move the ccw device to its new subchannel.
1155 * Note: device_move() changes cdev->dev.parent
1156 */
1157 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 921 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
1158 mutex_unlock(&sch->reg_mutex); 922 mutex_unlock(&sch->reg_mutex);
1159 if (rc) { 923 if (rc) {
1160 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " 924 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
1161 "0.%x.%04x failed (ret=%d)!\n",
1162 cdev->private->dev_id.ssid, 925 cdev->private->dev_id.ssid,
1163 cdev->private->dev_id.devno, sch->schid.ssid, 926 cdev->private->dev_id.devno, sch->schid.ssid,
1164 sch->schid.sch_no, rc); 927 sch->schib.pmcw.dev, rc);
1165 css_sch_device_unregister(sch); 928 if (old_enabled) {
1166 /* Put reference for new parent again. */ 929 /* Try to reenable the old subchannel. */
930 spin_lock_irq(old_sch->lock);
931 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
932 spin_unlock_irq(old_sch->lock);
933 }
934 /* Release child reference for new parent. */
1167 put_device(&sch->dev); 935 put_device(&sch->dev);
1168 goto out; 936 return rc;
1169 } 937 }
1170 if (!sch_is_pseudo_sch(former_parent)) { 938 /* Clean up old subchannel. */
1171 spin_lock_irq(former_parent->lock); 939 if (!sch_is_pseudo_sch(old_sch)) {
1172 sch_set_cdev(former_parent, NULL); 940 spin_lock_irq(old_sch->lock);
1173 spin_unlock_irq(former_parent->lock); 941 sch_set_cdev(old_sch, NULL);
1174 css_sch_device_unregister(former_parent); 942 spin_unlock_irq(old_sch->lock);
1175 /* Reset intparm to zeroes. */ 943 css_schedule_eval(old_sch->schid);
1176 former_parent->config.intparm = 0;
1177 cio_commit_config(former_parent);
1178 } 944 }
1179 sch_attach_device(sch, cdev); 945 /* Release child reference for old parent. */
1180out: 946 put_device(&old_sch->dev);
1181 /* Put reference for old parent. */ 947 /* Initialize new subchannel. */
1182 put_device(&former_parent->dev); 948 spin_lock_irq(sch->lock);
1183 put_device(&cdev->dev); 949 cdev->private->schid = sch->schid;
950 cdev->ccwlock = sch->lock;
951 if (!sch_is_pseudo_sch(sch))
952 sch_set_cdev(sch, cdev);
953 spin_unlock_irq(sch->lock);
954 if (!sch_is_pseudo_sch(sch))
955 css_update_ssd_info(sch);
956 return 0;
957}
958
959static int ccw_device_move_to_orph(struct ccw_device *cdev)
960{
961 struct subchannel *sch = to_subchannel(cdev->dev.parent);
962 struct channel_subsystem *css = to_css(sch->dev.parent);
963
964 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1184} 965}
1185 966
1186static void io_subchannel_irq(struct subchannel *sch) 967static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +980,6 @@ void io_subchannel_init_config(struct subchannel *sch)
1199{ 980{
1200 memset(&sch->config, 0, sizeof(sch->config)); 981 memset(&sch->config, 0, sizeof(sch->config));
1201 sch->config.csense = 1; 982 sch->config.csense = 1;
1202 /* Use subchannel mp mode when there is more than 1 installed CHPID. */
1203 if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1204 sch->config.mp = 1;
1205} 983}
1206 984
1207static void io_subchannel_init_fields(struct subchannel *sch) 985static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1000,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1222 io_subchannel_init_config(sch); 1000 io_subchannel_init_config(sch);
1223} 1001}
1224 1002
1225static void io_subchannel_do_unreg(struct work_struct *work)
1226{
1227 struct subchannel *sch;
1228
1229 sch = container_of(work, struct subchannel, work);
1230 css_sch_device_unregister(sch);
1231 put_device(&sch->dev);
1232}
1233
1234/* Schedule unregister if we have no cdev. */
1235static void io_subchannel_schedule_removal(struct subchannel *sch)
1236{
1237 get_device(&sch->dev);
1238 INIT_WORK(&sch->work, io_subchannel_do_unreg);
1239 queue_work(slow_path_wq, &sch->work);
1240}
1241
1242/* 1003/*
1243 * Note: We always return 0 so that we bind to the device even on error. 1004 * Note: We always return 0 so that we bind to the device even on error.
1244 * This is needed so that our remove function is called on unregister. 1005 * This is needed so that our remove function is called on unregister.
@@ -1247,8 +1008,6 @@ static int io_subchannel_probe(struct subchannel *sch)
1247{ 1008{
1248 struct ccw_device *cdev; 1009 struct ccw_device *cdev;
1249 int rc; 1010 int rc;
1250 unsigned long flags;
1251 struct ccw_dev_id dev_id;
1252 1011
1253 if (cio_is_console(sch->schid)) { 1012 if (cio_is_console(sch->schid)) {
1254 rc = sysfs_create_group(&sch->dev.kobj, 1013 rc = sysfs_create_group(&sch->dev.kobj,
@@ -1268,6 +1027,7 @@ static int io_subchannel_probe(struct subchannel *sch)
1268 cdev = sch_get_cdev(sch); 1027 cdev = sch_get_cdev(sch);
1269 cdev->dev.groups = ccwdev_attr_groups; 1028 cdev->dev.groups = ccwdev_attr_groups;
1270 device_initialize(&cdev->dev); 1029 device_initialize(&cdev->dev);
1030 cdev->private->flags.initialized = 1;
1271 ccw_device_register(cdev); 1031 ccw_device_register(cdev);
1272 /* 1032 /*
1273 * Check if the device is already online. If it is 1033 * Check if the device is already online. If it is
@@ -1292,44 +1052,14 @@ static int io_subchannel_probe(struct subchannel *sch)
1292 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1052 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1293 GFP_KERNEL | GFP_DMA); 1053 GFP_KERNEL | GFP_DMA);
1294 if (!sch->private) 1054 if (!sch->private)
1295 goto out_err; 1055 goto out_schedule;
1296 /* 1056 css_schedule_eval(sch->schid);
1297 * First check if a fitting device may be found amongst the
1298 * disconnected devices or in the orphanage.
1299 */
1300 dev_id.devno = sch->schib.pmcw.dev;
1301 dev_id.ssid = sch->schid.ssid;
1302 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1303 if (!cdev)
1304 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1305 &dev_id);
1306 if (cdev) {
1307 /*
1308 * Schedule moving the device until when we have a registered
1309 * subchannel to move to and succeed the probe. We can
1310 * unregister later again, when the probe is through.
1311 */
1312 cdev->private->sch = sch;
1313 PREPARE_WORK(&cdev->private->kick_work,
1314 ccw_device_move_to_sch);
1315 queue_work(slow_path_wq, &cdev->private->kick_work);
1316 return 0;
1317 }
1318 cdev = io_subchannel_create_ccwdev(sch);
1319 if (IS_ERR(cdev))
1320 goto out_err;
1321 rc = io_subchannel_recog(cdev, sch);
1322 if (rc) {
1323 spin_lock_irqsave(sch->lock, flags);
1324 io_subchannel_recog_done(cdev);
1325 spin_unlock_irqrestore(sch->lock, flags);
1326 }
1327 return 0; 1057 return 0;
1328out_err: 1058
1329 kfree(sch->private);
1330 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1331out_schedule: 1059out_schedule:
1332 io_subchannel_schedule_removal(sch); 1060 spin_lock_irq(sch->lock);
1061 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1062 spin_unlock_irq(sch->lock);
1333 return 0; 1063 return 0;
1334} 1064}
1335 1065
@@ -1337,32 +1067,23 @@ static int
1337io_subchannel_remove (struct subchannel *sch) 1067io_subchannel_remove (struct subchannel *sch)
1338{ 1068{
1339 struct ccw_device *cdev; 1069 struct ccw_device *cdev;
1340 unsigned long flags;
1341 1070
1342 cdev = sch_get_cdev(sch); 1071 cdev = sch_get_cdev(sch);
1343 if (!cdev) 1072 if (!cdev)
1344 return 0; 1073 goto out_free;
1074 io_subchannel_quiesce(sch);
1345 /* Set ccw device to not operational and drop reference. */ 1075 /* Set ccw device to not operational and drop reference. */
1346 spin_lock_irqsave(cdev->ccwlock, flags); 1076 spin_lock_irq(cdev->ccwlock);
1347 sch_set_cdev(sch, NULL); 1077 sch_set_cdev(sch, NULL);
1348 cdev->private->state = DEV_STATE_NOT_OPER; 1078 cdev->private->state = DEV_STATE_NOT_OPER;
1349 spin_unlock_irqrestore(cdev->ccwlock, flags); 1079 spin_unlock_irq(cdev->ccwlock);
1350 ccw_device_unregister(cdev); 1080 ccw_device_unregister(cdev);
1081out_free:
1351 kfree(sch->private); 1082 kfree(sch->private);
1352 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1083 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1353 return 0; 1084 return 0;
1354} 1085}
1355 1086
1356static int io_subchannel_notify(struct subchannel *sch, int event)
1357{
1358 struct ccw_device *cdev;
1359
1360 cdev = sch_get_cdev(sch);
1361 if (!cdev)
1362 return 0;
1363 return ccw_device_notify(cdev, event);
1364}
1365
1366static void io_subchannel_verify(struct subchannel *sch) 1087static void io_subchannel_verify(struct subchannel *sch)
1367{ 1088{
1368 struct ccw_device *cdev; 1089 struct ccw_device *cdev;
@@ -1372,36 +1093,6 @@ static void io_subchannel_verify(struct subchannel *sch)
1372 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1093 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1373} 1094}
1374 1095
1375static int check_for_io_on_path(struct subchannel *sch, int mask)
1376{
1377 if (cio_update_schib(sch))
1378 return 0;
1379 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1380 return 1;
1381 return 0;
1382}
1383
1384static void terminate_internal_io(struct subchannel *sch,
1385 struct ccw_device *cdev)
1386{
1387 if (cio_clear(sch)) {
1388 /* Recheck device in case clear failed. */
1389 sch->lpm = 0;
1390 if (cdev->online)
1391 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1392 else
1393 css_schedule_eval(sch->schid);
1394 return;
1395 }
1396 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1397 /* Request retry of internal operation. */
1398 cdev->private->flags.intretry = 1;
1399 /* Call handler. */
1400 if (cdev->handler)
1401 cdev->handler(cdev, cdev->private->intparm,
1402 ERR_PTR(-EIO));
1403}
1404
1405static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1096static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1406{ 1097{
1407 struct ccw_device *cdev; 1098 struct ccw_device *cdev;
@@ -1409,18 +1100,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1409 cdev = sch_get_cdev(sch); 1100 cdev = sch_get_cdev(sch);
1410 if (!cdev) 1101 if (!cdev)
1411 return; 1102 return;
1412 if (check_for_io_on_path(sch, mask)) { 1103 if (cio_update_schib(sch))
1413 if (cdev->private->state == DEV_STATE_ONLINE) 1104 goto err;
1414 ccw_device_kill_io(cdev); 1105 /* Check for I/O on path. */
1415 else { 1106 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1416 terminate_internal_io(sch, cdev); 1107 goto out;
1417 /* Re-start path verification. */ 1108 if (cdev->private->state == DEV_STATE_ONLINE) {
1418 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1109 ccw_device_kill_io(cdev);
1419 } 1110 goto out;
1420 } else 1111 }
1421 /* trigger path verification. */ 1112 if (cio_clear(sch))
1422 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1113 goto err;
1114out:
1115 /* Trigger path verification. */
1116 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1117 return;
1423 1118
1119err:
1120 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1424} 1121}
1425 1122
1426static int io_subchannel_chp_event(struct subchannel *sch, 1123static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1457,46 +1154,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1457 return 0; 1154 return 0;
1458} 1155}
1459 1156
1460static void 1157static void io_subchannel_quiesce(struct subchannel *sch)
1461io_subchannel_shutdown(struct subchannel *sch)
1462{ 1158{
1463 struct ccw_device *cdev; 1159 struct ccw_device *cdev;
1464 int ret; 1160 int ret;
1465 1161
1162 spin_lock_irq(sch->lock);
1466 cdev = sch_get_cdev(sch); 1163 cdev = sch_get_cdev(sch);
1467
1468 if (cio_is_console(sch->schid)) 1164 if (cio_is_console(sch->schid))
1469 return; 1165 goto out_unlock;
1470 if (!sch->schib.pmcw.ena) 1166 if (!sch->schib.pmcw.ena)
1471 /* Nothing to do. */ 1167 goto out_unlock;
1472 return;
1473 ret = cio_disable_subchannel(sch); 1168 ret = cio_disable_subchannel(sch);
1474 if (ret != -EBUSY) 1169 if (ret != -EBUSY)
1475 /* Subchannel is disabled, we're done. */ 1170 goto out_unlock;
1476 return;
1477 cdev->private->state = DEV_STATE_QUIESCE;
1478 if (cdev->handler) 1171 if (cdev->handler)
1479 cdev->handler(cdev, cdev->private->intparm, 1172 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1480 ERR_PTR(-EIO)); 1173 while (ret == -EBUSY) {
1481 ret = ccw_device_cancel_halt_clear(cdev); 1174 cdev->private->state = DEV_STATE_QUIESCE;
1482 if (ret == -EBUSY) { 1175 ret = ccw_device_cancel_halt_clear(cdev);
1483 ccw_device_set_timeout(cdev, HZ/10); 1176 if (ret == -EBUSY) {
1484 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1177 ccw_device_set_timeout(cdev, HZ/10);
1178 spin_unlock_irq(sch->lock);
1179 wait_event(cdev->private->wait_q,
1180 cdev->private->state != DEV_STATE_QUIESCE);
1181 spin_lock_irq(sch->lock);
1182 }
1183 ret = cio_disable_subchannel(sch);
1485 } 1184 }
1486 cio_disable_subchannel(sch); 1185out_unlock:
1186 spin_unlock_irq(sch->lock);
1487} 1187}
1488 1188
1489static int io_subchannel_get_status(struct subchannel *sch) 1189static void io_subchannel_shutdown(struct subchannel *sch)
1490{ 1190{
1491 struct schib schib; 1191 io_subchannel_quiesce(sch);
1492
1493 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1494 return CIO_GONE;
1495 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1496 return CIO_REVALIDATE;
1497 if (!sch->lpm)
1498 return CIO_NO_PATH;
1499 return CIO_OPER;
1500} 1192}
1501 1193
1502static int device_is_disconnected(struct ccw_device *cdev) 1194static int device_is_disconnected(struct ccw_device *cdev)
@@ -1575,20 +1267,16 @@ static void ccw_device_schedule_recovery(void)
1575static int purge_fn(struct device *dev, void *data) 1267static int purge_fn(struct device *dev, void *data)
1576{ 1268{
1577 struct ccw_device *cdev = to_ccwdev(dev); 1269 struct ccw_device *cdev = to_ccwdev(dev);
1578 struct ccw_device_private *priv = cdev->private; 1270 struct ccw_dev_id *id = &cdev->private->dev_id;
1579 int unreg;
1580 1271
1581 spin_lock_irq(cdev->ccwlock); 1272 spin_lock_irq(cdev->ccwlock);
1582 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && 1273 if (is_blacklisted(id->ssid, id->devno) &&
1583 (priv->state == DEV_STATE_OFFLINE); 1274 (cdev->private->state == DEV_STATE_OFFLINE)) {
1275 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1276 id->devno);
1277 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1278 }
1584 spin_unlock_irq(cdev->ccwlock); 1279 spin_unlock_irq(cdev->ccwlock);
1585 if (!unreg)
1586 goto out;
1587 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1588 priv->dev_id.devno);
1589 ccw_device_schedule_sch_unregister(cdev);
1590
1591out:
1592 /* Abort loop in case of pending signal. */ 1280 /* Abort loop in case of pending signal. */
1593 if (signal_pending(current)) 1281 if (signal_pending(current))
1594 return -EINTR; 1282 return -EINTR;
@@ -1630,91 +1318,177 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
1630 cdev->private->state = DEV_STATE_NOT_OPER; 1318 cdev->private->state = DEV_STATE_NOT_OPER;
1631} 1319}
1632 1320
1633static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1321enum io_sch_action {
1322 IO_SCH_UNREG,
1323 IO_SCH_ORPH_UNREG,
1324 IO_SCH_ATTACH,
1325 IO_SCH_UNREG_ATTACH,
1326 IO_SCH_ORPH_ATTACH,
1327 IO_SCH_REPROBE,
1328 IO_SCH_VERIFY,
1329 IO_SCH_DISC,
1330 IO_SCH_NOP,
1331};
1332
1333static enum io_sch_action sch_get_action(struct subchannel *sch)
1634{ 1334{
1635 int event, ret, disc;
1636 unsigned long flags;
1637 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
1638 struct ccw_device *cdev; 1335 struct ccw_device *cdev;
1639 1336
1640 spin_lock_irqsave(sch->lock, flags);
1641 cdev = sch_get_cdev(sch); 1337 cdev = sch_get_cdev(sch);
1642 disc = device_is_disconnected(cdev); 1338 if (cio_update_schib(sch)) {
1643 if (disc && slow) { 1339 /* Not operational. */
1644 /* Disconnected devices are evaluated directly only.*/ 1340 if (!cdev)
1645 spin_unlock_irqrestore(sch->lock, flags); 1341 return IO_SCH_UNREG;
1646 return 0; 1342 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1343 return IO_SCH_UNREG;
1344 return IO_SCH_ORPH_UNREG;
1647 } 1345 }
1648 /* No interrupt after machine check - kill pending timers. */ 1346 /* Operational. */
1649 if (cdev) 1347 if (!cdev)
1650 ccw_device_set_timeout(cdev, 0); 1348 return IO_SCH_ATTACH;
1651 if (!disc && !slow) { 1349 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1652 /* Non-disconnected devices are evaluated on the slow path. */ 1350 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1653 spin_unlock_irqrestore(sch->lock, flags); 1351 return IO_SCH_UNREG_ATTACH;
1654 return -EAGAIN; 1352 return IO_SCH_ORPH_ATTACH;
1655 } 1353 }
1656 event = io_subchannel_get_status(sch); 1354 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1657 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 1355 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1658 sch->schid.ssid, sch->schid.sch_no, event, 1356 return IO_SCH_UNREG;
1659 disc ? "disconnected" : "normal", 1357 return IO_SCH_DISC;
1660 slow ? "slow" : "fast"); 1358 }
1661 /* Analyze subchannel status. */ 1359 if (device_is_disconnected(cdev))
1662 action = NONE; 1360 return IO_SCH_REPROBE;
1663 switch (event) { 1361 if (cdev->online)
1664 case CIO_NO_PATH: 1362 return IO_SCH_VERIFY;
1665 if (disc) { 1363 return IO_SCH_NOP;
1666 /* Check if paths have become available. */ 1364}
1667 action = REPROBE; 1365
1668 break; 1366/**
1367 * io_subchannel_sch_event - process subchannel event
1368 * @sch: subchannel
1369 * @process: non-zero if function is called in process context
1370 *
1371 * An unspecified event occurred for this subchannel. Adjust data according
1372 * to the current operational state of the subchannel and device. Return
1373 * zero when the event has been handled sufficiently or -EAGAIN when this
1374 * function should be called again in process context.
1375 */
1376static int io_subchannel_sch_event(struct subchannel *sch, int process)
1377{
1378 unsigned long flags;
1379 struct ccw_device *cdev;
1380 struct ccw_dev_id dev_id;
1381 enum io_sch_action action;
1382 int rc = -EAGAIN;
1383
1384 spin_lock_irqsave(sch->lock, flags);
1385 if (!device_is_registered(&sch->dev))
1386 goto out_unlock;
1387 if (work_pending(&sch->todo_work))
1388 goto out_unlock;
1389 cdev = sch_get_cdev(sch);
1390 if (cdev && work_pending(&cdev->private->todo_work))
1391 goto out_unlock;
1392 action = sch_get_action(sch);
1393 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1394 sch->schid.ssid, sch->schid.sch_no, process,
1395 action);
1396 /* Perform immediate actions while holding the lock. */
1397 switch (action) {
1398 case IO_SCH_REPROBE:
1399 /* Trigger device recognition. */
1400 ccw_device_trigger_reprobe(cdev);
1401 rc = 0;
1402 goto out_unlock;
1403 case IO_SCH_VERIFY:
1404 if (cdev->private->flags.resuming == 1) {
1405 if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1406 ccw_device_set_notoper(cdev);
1407 break;
1408 }
1669 } 1409 }
1670 /* fall through */ 1410 /* Trigger path verification. */
1671 case CIO_GONE: 1411 io_subchannel_verify(sch);
1672 /* Ask driver what to do with device. */ 1412 rc = 0;
1673 if (io_subchannel_notify(sch, event)) 1413 goto out_unlock;
1674 action = DISC; 1414 case IO_SCH_DISC:
1675 else 1415 ccw_device_set_disconnected(cdev);
1676 action = UNREGISTER; 1416 rc = 0;
1417 goto out_unlock;
1418 case IO_SCH_ORPH_UNREG:
1419 case IO_SCH_ORPH_ATTACH:
1420 ccw_device_set_disconnected(cdev);
1677 break; 1421 break;
1678 case CIO_REVALIDATE: 1422 case IO_SCH_UNREG_ATTACH:
1679 /* Device will be removed, so no notify necessary. */ 1423 case IO_SCH_UNREG:
1680 if (disc) 1424 if (cdev)
1681 /* Reprobe because immediate unregister might block. */ 1425 ccw_device_set_notoper(cdev);
1682 action = REPROBE;
1683 else
1684 action = UNREGISTER_PROBE;
1685 break; 1426 break;
1686 case CIO_OPER: 1427 case IO_SCH_NOP:
1687 if (disc) 1428 rc = 0;
1688 /* Get device operational again. */ 1429 goto out_unlock;
1689 action = REPROBE; 1430 default:
1690 break; 1431 break;
1691 } 1432 }
1692 /* Perform action. */ 1433 spin_unlock_irqrestore(sch->lock, flags);
1693 ret = 0; 1434 /* All other actions require process context. */
1435 if (!process)
1436 goto out;
1437 /* Handle attached ccw device. */
1694 switch (action) { 1438 switch (action) {
1695 case UNREGISTER: 1439 case IO_SCH_ORPH_UNREG:
1696 case UNREGISTER_PROBE: 1440 case IO_SCH_ORPH_ATTACH:
1697 ccw_device_set_notoper(cdev); 1441 /* Move ccw device to orphanage. */
1698 /* Unregister device (will use subchannel lock). */ 1442 rc = ccw_device_move_to_orph(cdev);
1699 spin_unlock_irqrestore(sch->lock, flags); 1443 if (rc)
1700 css_sch_device_unregister(sch); 1444 goto out;
1701 spin_lock_irqsave(sch->lock, flags);
1702 break; 1445 break;
1703 case REPROBE: 1446 case IO_SCH_UNREG_ATTACH:
1704 ccw_device_trigger_reprobe(cdev); 1447 /* Unregister ccw device. */
1448 if (!cdev->private->flags.resuming)
1449 ccw_device_unregister(cdev);
1705 break; 1450 break;
1706 case DISC: 1451 default:
1707 ccw_device_set_disconnected(cdev); 1452 break;
1453 }
1454 /* Handle subchannel. */
1455 switch (action) {
1456 case IO_SCH_ORPH_UNREG:
1457 case IO_SCH_UNREG:
1458 if (!cdev || !cdev->private->flags.resuming)
1459 css_sch_device_unregister(sch);
1460 break;
1461 case IO_SCH_ORPH_ATTACH:
1462 case IO_SCH_UNREG_ATTACH:
1463 case IO_SCH_ATTACH:
1464 dev_id.ssid = sch->schid.ssid;
1465 dev_id.devno = sch->schib.pmcw.dev;
1466 cdev = get_ccwdev_by_dev_id(&dev_id);
1467 if (!cdev) {
1468 sch_create_and_recog_new_device(sch);
1469 break;
1470 }
1471 rc = ccw_device_move_to_sch(cdev, sch);
1472 if (rc) {
1473 /* Release reference from get_ccwdev_by_dev_id() */
1474 put_device(&cdev->dev);
1475 goto out;
1476 }
1477 spin_lock_irqsave(sch->lock, flags);
1478 ccw_device_trigger_reprobe(cdev);
1479 spin_unlock_irqrestore(sch->lock, flags);
1480 /* Release reference from get_ccwdev_by_dev_id() */
1481 put_device(&cdev->dev);
1708 break; 1482 break;
1709 default: 1483 default:
1710 break; 1484 break;
1711 } 1485 }
1712 spin_unlock_irqrestore(sch->lock, flags); 1486 return 0;
1713 /* Probe if necessary. */
1714 if (action == UNREGISTER_PROBE)
1715 ret = css_probe_device(sch->schid);
1716 1487
1717 return ret; 1488out_unlock:
1489 spin_unlock_irqrestore(sch->lock, flags);
1490out:
1491 return rc;
1718} 1492}
1719 1493
1720#ifdef CONFIG_CCW_CONSOLE 1494#ifdef CONFIG_CCW_CONSOLE
@@ -1744,10 +1518,8 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1744 sch->driver = &io_subchannel_driver; 1518 sch->driver = &io_subchannel_driver;
1745 /* Initialize the ccw_device structure. */ 1519 /* Initialize the ccw_device structure. */
1746 cdev->dev.parent= &sch->dev; 1520 cdev->dev.parent= &sch->dev;
1747 rc = io_subchannel_recog(cdev, sch); 1521 sch_set_cdev(sch, cdev);
1748 if (rc) 1522 io_subchannel_recog(cdev, sch);
1749 return rc;
1750
1751 /* Now wait for the async. recognition to come to an end. */ 1523 /* Now wait for the async. recognition to come to an end. */
1752 spin_lock_irq(cdev->ccwlock); 1524 spin_lock_irq(cdev->ccwlock);
1753 while (!dev_fsm_final_state(cdev)) 1525 while (!dev_fsm_final_state(cdev))
@@ -1763,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1763 rc = 0; 1535 rc = 0;
1764out_unlock: 1536out_unlock:
1765 spin_unlock_irq(cdev->ccwlock); 1537 spin_unlock_irq(cdev->ccwlock);
1766 return 0; 1538 return rc;
1767} 1539}
1768 1540
1769struct ccw_device * 1541struct ccw_device *
@@ -1919,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
1919{ 1691{
1920 struct ccw_device *cdev = to_ccwdev(dev); 1692 struct ccw_device *cdev = to_ccwdev(dev);
1921 1693
1922 if (work_pending(&cdev->private->kick_work)) 1694 if (work_pending(&cdev->private->todo_work))
1923 return -EAGAIN; 1695 return -EAGAIN;
1924 /* Fail while device is being set online/offline. */ 1696 /* Fail while device is being set online/offline. */
1925 if (atomic_read(&cdev->private->onoff)) 1697 if (atomic_read(&cdev->private->onoff))
@@ -2005,119 +1777,117 @@ static int ccw_device_pm_thaw(struct device *dev)
2005static void __ccw_device_pm_restore(struct ccw_device *cdev) 1777static void __ccw_device_pm_restore(struct ccw_device *cdev)
2006{ 1778{
2007 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1779 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2008 int ret;
2009 1780
2010 if (cio_is_console(sch->schid)) 1781 spin_lock_irq(sch->lock);
2011 goto out; 1782 if (cio_is_console(sch->schid)) {
1783 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1784 goto out_unlock;
1785 }
2012 /* 1786 /*
2013 * While we were sleeping, devices may have gone or become 1787 * While we were sleeping, devices may have gone or become
2014 * available again. Kick re-detection. 1788 * available again. Kick re-detection.
2015 */ 1789 */
2016 spin_lock_irq(sch->lock);
2017 cdev->private->flags.resuming = 1; 1790 cdev->private->flags.resuming = 1;
2018 ret = ccw_device_recognition(cdev); 1791 css_schedule_eval(sch->schid);
1792 spin_unlock_irq(sch->lock);
1793 css_complete_work();
1794
1795 /* cdev may have been moved to a different subchannel. */
1796 sch = to_subchannel(cdev->dev.parent);
1797 spin_lock_irq(sch->lock);
1798 if (cdev->private->state != DEV_STATE_ONLINE &&
1799 cdev->private->state != DEV_STATE_OFFLINE)
1800 goto out_unlock;
1801
1802 ccw_device_recognition(cdev);
2019 spin_unlock_irq(sch->lock); 1803 spin_unlock_irq(sch->lock);
2020 if (ret) {
2021 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2022 "0.%x.%04x (ret=%d)\n",
2023 cdev->private->dev_id.ssid,
2024 cdev->private->dev_id.devno, ret);
2025 spin_lock_irq(sch->lock);
2026 cdev->private->state = DEV_STATE_DISCONNECTED;
2027 spin_unlock_irq(sch->lock);
2028 /* notify driver after the resume cb */
2029 goto out;
2030 }
2031 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1804 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
2032 cdev->private->state == DEV_STATE_DISCONNECTED); 1805 cdev->private->state == DEV_STATE_DISCONNECTED);
1806 spin_lock_irq(sch->lock);
2033 1807
2034out: 1808out_unlock:
2035 cdev->private->flags.resuming = 0; 1809 cdev->private->flags.resuming = 0;
1810 spin_unlock_irq(sch->lock);
2036} 1811}
2037 1812
2038static int resume_handle_boxed(struct ccw_device *cdev) 1813static int resume_handle_boxed(struct ccw_device *cdev)
2039{ 1814{
2040 cdev->private->state = DEV_STATE_BOXED; 1815 cdev->private->state = DEV_STATE_BOXED;
2041 if (ccw_device_notify(cdev, CIO_BOXED)) 1816 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
2042 return 0; 1817 return 0;
2043 ccw_device_schedule_sch_unregister(cdev); 1818 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2044 return -ENODEV; 1819 return -ENODEV;
2045} 1820}
2046 1821
2047static int resume_handle_disc(struct ccw_device *cdev) 1822static int resume_handle_disc(struct ccw_device *cdev)
2048{ 1823{
2049 cdev->private->state = DEV_STATE_DISCONNECTED; 1824 cdev->private->state = DEV_STATE_DISCONNECTED;
2050 if (ccw_device_notify(cdev, CIO_GONE)) 1825 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
2051 return 0; 1826 return 0;
2052 ccw_device_schedule_sch_unregister(cdev); 1827 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2053 return -ENODEV; 1828 return -ENODEV;
2054} 1829}
2055 1830
2056static int ccw_device_pm_restore(struct device *dev) 1831static int ccw_device_pm_restore(struct device *dev)
2057{ 1832{
2058 struct ccw_device *cdev = to_ccwdev(dev); 1833 struct ccw_device *cdev = to_ccwdev(dev);
2059 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1834 struct subchannel *sch;
2060 int ret = 0, cm_enabled; 1835 int ret = 0;
2061 1836
2062 __ccw_device_pm_restore(cdev); 1837 __ccw_device_pm_restore(cdev);
1838 sch = to_subchannel(cdev->dev.parent);
2063 spin_lock_irq(sch->lock); 1839 spin_lock_irq(sch->lock);
2064 if (cio_is_console(sch->schid)) { 1840 if (cio_is_console(sch->schid))
2065 cio_enable_subchannel(sch, (u32)(addr_t)sch);
2066 spin_unlock_irq(sch->lock);
2067 goto out_restore; 1841 goto out_restore;
2068 } 1842
2069 cdev->private->flags.donotify = 0;
2070 /* check recognition results */ 1843 /* check recognition results */
2071 switch (cdev->private->state) { 1844 switch (cdev->private->state) {
2072 case DEV_STATE_OFFLINE: 1845 case DEV_STATE_OFFLINE:
1846 case DEV_STATE_ONLINE:
1847 cdev->private->flags.donotify = 0;
2073 break; 1848 break;
2074 case DEV_STATE_BOXED: 1849 case DEV_STATE_BOXED:
2075 ret = resume_handle_boxed(cdev); 1850 ret = resume_handle_boxed(cdev);
2076 spin_unlock_irq(sch->lock);
2077 if (ret) 1851 if (ret)
2078 goto out; 1852 goto out_unlock;
2079 goto out_restore; 1853 goto out_restore;
2080 case DEV_STATE_DISCONNECTED:
2081 goto out_disc_unlock;
2082 default: 1854 default:
2083 goto out_unreg_unlock; 1855 ret = resume_handle_disc(cdev);
2084 } 1856 if (ret)
2085 /* check if the device id has changed */ 1857 goto out_unlock;
2086 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1858 goto out_restore;
2087 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
2088 "changed from %04x to %04x)\n",
2089 sch->schid.ssid, sch->schid.sch_no,
2090 cdev->private->dev_id.devno,
2091 sch->schib.pmcw.dev);
2092 goto out_unreg_unlock;
2093 } 1859 }
2094 /* check if the device type has changed */ 1860 /* check if the device type has changed */
2095 if (!ccw_device_test_sense_data(cdev)) { 1861 if (!ccw_device_test_sense_data(cdev)) {
2096 ccw_device_update_sense_data(cdev); 1862 ccw_device_update_sense_data(cdev);
2097 PREPARE_WORK(&cdev->private->kick_work, 1863 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
2098 ccw_device_do_unbind_bind);
2099 queue_work(ccw_device_work, &cdev->private->kick_work);
2100 ret = -ENODEV; 1864 ret = -ENODEV;
2101 goto out_unlock; 1865 goto out_unlock;
2102 } 1866 }
2103 if (!cdev->online) { 1867 if (!cdev->online)
2104 ret = 0;
2105 goto out_unlock; 1868 goto out_unlock;
2106 }
2107 ret = ccw_device_online(cdev);
2108 if (ret)
2109 goto out_disc_unlock;
2110 1869
2111 cm_enabled = cdev->private->cmb != NULL; 1870 if (ccw_device_online(cdev)) {
1871 ret = resume_handle_disc(cdev);
1872 if (ret)
1873 goto out_unlock;
1874 goto out_restore;
1875 }
2112 spin_unlock_irq(sch->lock); 1876 spin_unlock_irq(sch->lock);
2113
2114 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1877 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
2115 if (cdev->private->state != DEV_STATE_ONLINE) { 1878 spin_lock_irq(sch->lock);
2116 spin_lock_irq(sch->lock); 1879
2117 goto out_disc_unlock; 1880 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1881 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1882 ret = -ENODEV;
1883 goto out_unlock;
2118 } 1884 }
2119 if (cm_enabled) { 1885
1886 /* reenable cmf, if needed */
1887 if (cdev->private->cmb) {
1888 spin_unlock_irq(sch->lock);
2120 ret = ccw_set_cmf(cdev, 1); 1889 ret = ccw_set_cmf(cdev, 1);
1890 spin_lock_irq(sch->lock);
2121 if (ret) { 1891 if (ret) {
2122 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1892 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2123 "(rc=%d)\n", cdev->private->dev_id.ssid, 1893 "(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -2127,27 +1897,17 @@ static int ccw_device_pm_restore(struct device *dev)
2127 } 1897 }
2128 1898
2129out_restore: 1899out_restore:
1900 spin_unlock_irq(sch->lock);
2130 if (cdev->online && cdev->drv && cdev->drv->restore) 1901 if (cdev->online && cdev->drv && cdev->drv->restore)
2131 ret = cdev->drv->restore(cdev); 1902 ret = cdev->drv->restore(cdev);
2132out:
2133 return ret; 1903 return ret;
2134 1904
2135out_disc_unlock:
2136 ret = resume_handle_disc(cdev);
2137 spin_unlock_irq(sch->lock);
2138 if (ret)
2139 return ret;
2140 goto out_restore;
2141
2142out_unreg_unlock:
2143 ccw_device_schedule_sch_unregister(cdev);
2144 ret = -ENODEV;
2145out_unlock: 1905out_unlock:
2146 spin_unlock_irq(sch->lock); 1906 spin_unlock_irq(sch->lock);
2147 return ret; 1907 return ret;
2148} 1908}
2149 1909
2150static struct dev_pm_ops ccw_pm_ops = { 1910static const struct dev_pm_ops ccw_pm_ops = {
2151 .prepare = ccw_device_pm_prepare, 1911 .prepare = ccw_device_pm_prepare,
2152 .complete = ccw_device_pm_complete, 1912 .complete = ccw_device_pm_complete,
2153 .freeze = ccw_device_pm_freeze, 1913 .freeze = ccw_device_pm_freeze,
@@ -2205,6 +1965,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
2205 return sch->schid; 1965 return sch->schid;
2206} 1966}
2207 1967
1968static void ccw_device_todo(struct work_struct *work)
1969{
1970 struct ccw_device_private *priv;
1971 struct ccw_device *cdev;
1972 struct subchannel *sch;
1973 enum cdev_todo todo;
1974
1975 priv = container_of(work, struct ccw_device_private, todo_work);
1976 cdev = priv->cdev;
1977 sch = to_subchannel(cdev->dev.parent);
1978 /* Find out todo. */
1979 spin_lock_irq(cdev->ccwlock);
1980 todo = priv->todo;
1981 priv->todo = CDEV_TODO_NOTHING;
1982 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1983 priv->dev_id.ssid, priv->dev_id.devno, todo);
1984 spin_unlock_irq(cdev->ccwlock);
1985 /* Perform todo. */
1986 switch (todo) {
1987 case CDEV_TODO_ENABLE_CMF:
1988 cmf_reenable(cdev);
1989 break;
1990 case CDEV_TODO_REBIND:
1991 ccw_device_do_unbind_bind(cdev);
1992 break;
1993 case CDEV_TODO_REGISTER:
1994 io_subchannel_register(cdev);
1995 break;
1996 case CDEV_TODO_UNREG_EVAL:
1997 if (!sch_is_pseudo_sch(sch))
1998 css_schedule_eval(sch->schid);
1999 /* fall-through */
2000 case CDEV_TODO_UNREG:
2001 if (sch_is_pseudo_sch(sch))
2002 ccw_device_unregister(cdev);
2003 else
2004 ccw_device_call_sch_unregister(cdev);
2005 break;
2006 default:
2007 break;
2008 }
2009 /* Release workqueue ref. */
2010 put_device(&cdev->dev);
2011}
2012
2013/**
2014 * ccw_device_sched_todo - schedule ccw device operation
2015 * @cdev: ccw device
2016 * @todo: todo
2017 *
2018 * Schedule the operation identified by @todo to be performed on the slow path
2019 * workqueue. Do nothing if another operation with higher priority is already
2020 * scheduled. Needs to be called with ccwdev lock held.
2021 */
2022void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2023{
2024 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2025 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2026 todo);
2027 if (cdev->private->todo >= todo)
2028 return;
2029 cdev->private->todo = todo;
2030 /* Get workqueue ref. */
2031 if (!get_device(&cdev->dev))
2032 return;
2033 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2034 /* Already queued, release workqueue ref. */
2035 put_device(&cdev->dev);
2036 }
2037}
2038
2208MODULE_LICENSE("GPL"); 2039MODULE_LICENSE("GPL");
2209EXPORT_SYMBOL(ccw_device_set_online); 2040EXPORT_SYMBOL(ccw_device_set_online);
2210EXPORT_SYMBOL(ccw_device_set_offline); 2041EXPORT_SYMBOL(ccw_device_set_offline);
@@ -2212,5 +2043,4 @@ EXPORT_SYMBOL(ccw_driver_register);
2212EXPORT_SYMBOL(ccw_driver_unregister); 2043EXPORT_SYMBOL(ccw_driver_unregister);
2213EXPORT_SYMBOL(get_ccwdev_by_busid); 2044EXPORT_SYMBOL(get_ccwdev_by_busid);
2214EXPORT_SYMBOL(ccw_bus_type); 2045EXPORT_SYMBOL(ccw_bus_type);
2215EXPORT_SYMBOL(ccw_device_work);
2216EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2046EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 246c6482842c..379de2d1ec49 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,7 +4,7 @@
4#include <asm/ccwdev.h> 4#include <asm/ccwdev.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7#include <linux/notifier.h>
8#include "io_sch.h" 8#include "io_sch.h"
9 9
10/* 10/*
@@ -21,7 +21,6 @@ enum dev_state {
21 DEV_STATE_DISBAND_PGID, 21 DEV_STATE_DISBAND_PGID,
22 DEV_STATE_BOXED, 22 DEV_STATE_BOXED,
23 /* states to wait for i/o completion before doing something */ 23 /* states to wait for i/o completion before doing something */
24 DEV_STATE_CLEAR_VERIFY,
25 DEV_STATE_TIMEOUT_KILL, 24 DEV_STATE_TIMEOUT_KILL,
26 DEV_STATE_QUIESCE, 25 DEV_STATE_QUIESCE,
27 /* special states for devices gone not operational */ 26 /* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
29 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
30 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
31 DEV_STATE_CMFUPDATE, 30 DEV_STATE_CMFUPDATE,
31 DEV_STATE_STEAL_LOCK,
32 /* last element! */ 32 /* last element! */
33 NR_DEV_STATES 33 NR_DEV_STATES
34}; 34};
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
71 cdev->private->state == DEV_STATE_BOXED); 71 cdev->private->state == DEV_STATE_BOXED);
72} 72}
73 73
74extern struct workqueue_struct *ccw_device_work;
75extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
76extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
77int __init io_subchannel_init(void); 76int __init io_subchannel_init(void);
@@ -81,17 +80,16 @@ void io_subchannel_init_config(struct subchannel *sch);
81 80
82int ccw_device_cancel_halt_clear(struct ccw_device *); 81int ccw_device_cancel_halt_clear(struct ccw_device *);
83 82
84void ccw_device_do_unbind_bind(struct work_struct *);
85void ccw_device_move_to_orphanage(struct work_struct *);
86int ccw_device_is_orphan(struct ccw_device *); 83int ccw_device_is_orphan(struct ccw_device *);
87 84
88int ccw_device_recognition(struct ccw_device *); 85void ccw_device_recognition(struct ccw_device *);
89int ccw_device_online(struct ccw_device *); 86int ccw_device_online(struct ccw_device *);
90int ccw_device_offline(struct ccw_device *); 87int ccw_device_offline(struct ccw_device *);
91void ccw_device_update_sense_data(struct ccw_device *); 88void ccw_device_update_sense_data(struct ccw_device *);
92int ccw_device_test_sense_data(struct ccw_device *); 89int ccw_device_test_sense_data(struct ccw_device *);
93void ccw_device_schedule_sch_unregister(struct ccw_device *); 90void ccw_device_schedule_sch_unregister(struct ccw_device *);
94int ccw_purge_blacklisted(void); 91int ccw_purge_blacklisted(void);
92void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
95 93
96/* Function prototypes for device status and basic sense stuff. */ 94/* Function prototypes for device status and basic sense stuff. */
97void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 95void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +97,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
99int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); 97int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
100int ccw_device_do_sense(struct ccw_device *, struct irb *); 98int ccw_device_do_sense(struct ccw_device *, struct irb *);
101 99
100/* Function prototype for internal request handling. */
101int lpm_adjust(int lpm, int mask);
102void ccw_request_start(struct ccw_device *);
103int ccw_request_cancel(struct ccw_device *cdev);
104void ccw_request_handler(struct ccw_device *cdev);
105void ccw_request_timeout(struct ccw_device *cdev);
106void ccw_request_notoper(struct ccw_device *cdev);
107
102/* Function prototypes for sense id stuff. */ 108/* Function prototypes for sense id stuff. */
103void ccw_device_sense_id_start(struct ccw_device *); 109void ccw_device_sense_id_start(struct ccw_device *);
104void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
105void ccw_device_sense_id_done(struct ccw_device *, int); 110void ccw_device_sense_id_done(struct ccw_device *, int);
106 111
107/* Function prototypes for path grouping stuff. */ 112/* Function prototypes for path grouping stuff. */
108void ccw_device_sense_pgid_start(struct ccw_device *);
109void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
110void ccw_device_sense_pgid_done(struct ccw_device *, int);
111
112void ccw_device_verify_start(struct ccw_device *); 113void ccw_device_verify_start(struct ccw_device *);
113void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
114void ccw_device_verify_done(struct ccw_device *, int); 114void ccw_device_verify_done(struct ccw_device *, int);
115 115
116void ccw_device_disband_start(struct ccw_device *); 116void ccw_device_disband_start(struct ccw_device *);
117void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
118void ccw_device_disband_done(struct ccw_device *, int); 117void ccw_device_disband_done(struct ccw_device *, int);
119 118
119void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
120void ccw_device_stlck_done(struct ccw_device *, void *, int);
121
120int ccw_device_call_handler(struct ccw_device *); 122int ccw_device_call_handler(struct ccw_device *);
121 123
122int ccw_device_stlck(struct ccw_device *); 124int ccw_device_stlck(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index b9613d7df9ef..c9b852647f01 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
45 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
46 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb; 47 orb = &private->orb;
48 cc = stsch(sch->schid, &schib); 48 cc = stsch_err(sch->schid, &schib);
49 49
50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
51 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
229 229
230 sch = to_subchannel(cdev->dev.parent); 230 sch = to_subchannel(cdev->dev.parent);
231 231
232 ccw_device_set_timeout(cdev, 0); 232 if (cio_disable_subchannel(sch))
233 cio_disable_subchannel(sch); 233 state = DEV_STATE_NOT_OPER;
234 /* 234 /*
235 * Now that we tried recognition, we have performed device selection 235 * Now that we tried recognition, we have performed device selection
236 * through ssch() and the path information is up to date. 236 * through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
263 } 263 }
264 switch (state) { 264 switch (state) {
265 case DEV_STATE_NOT_OPER: 265 case DEV_STATE_NOT_OPER:
266 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
267 "subchannel 0.%x.%04x\n",
268 cdev->private->dev_id.devno,
269 sch->schid.ssid, sch->schid.sch_no);
270 break; 266 break;
271 case DEV_STATE_OFFLINE: 267 case DEV_STATE_OFFLINE:
272 if (!cdev->online) { 268 if (!cdev->online) {
273 ccw_device_update_sense_data(cdev); 269 ccw_device_update_sense_data(cdev);
274 /* Issue device info message. */
275 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
276 "CU Type/Mod = %04X/%02X, Dev Type/Mod "
277 "= %04X/%02X\n",
278 cdev->private->dev_id.ssid,
279 cdev->private->dev_id.devno,
280 cdev->id.cu_type, cdev->id.cu_model,
281 cdev->id.dev_type, cdev->id.dev_model);
282 break; 270 break;
283 } 271 }
284 cdev->private->state = DEV_STATE_OFFLINE; 272 cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
289 wake_up(&cdev->private->wait_q); 277 wake_up(&cdev->private->wait_q);
290 } else { 278 } else {
291 ccw_device_update_sense_data(cdev); 279 ccw_device_update_sense_data(cdev);
292 PREPARE_WORK(&cdev->private->kick_work, 280 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
293 ccw_device_do_unbind_bind);
294 queue_work(ccw_device_work, &cdev->private->kick_work);
295 } 281 }
296 return; 282 return;
297 case DEV_STATE_BOXED: 283 case DEV_STATE_BOXED:
298 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
299 " subchannel 0.%x.%04x\n",
300 cdev->private->dev_id.devno,
301 sch->schid.ssid, sch->schid.sch_no);
302 if (cdev->id.cu_type != 0) { /* device was recognized before */ 284 if (cdev->id.cu_type != 0) { /* device was recognized before */
303 cdev->private->flags.recog_done = 1; 285 cdev->private->flags.recog_done = 1;
304 cdev->private->state = DEV_STATE_BOXED; 286 cdev->private->state = DEV_STATE_BOXED;
@@ -331,40 +313,50 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
331 } 313 }
332} 314}
333 315
316/**
317 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured
319 * @event: event that occurred
320 *
321 * Returns:
322 * -%EINVAL if the device is offline or has no driver.
323 * -%EOPNOTSUPP if the device's driver has no notifier registered.
324 * %NOTIFY_OK if the driver wants to keep the device.
325 * %NOTIFY_BAD if the driver doesn't want to keep the device.
326 */
334int ccw_device_notify(struct ccw_device *cdev, int event) 327int ccw_device_notify(struct ccw_device *cdev, int event)
335{ 328{
329 int ret = -EINVAL;
330
336 if (!cdev->drv) 331 if (!cdev->drv)
337 return 0; 332 goto out;
338 if (!cdev->online) 333 if (!cdev->online)
339 return 0; 334 goto out;
340 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 335 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
341 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 336 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
342 event); 337 event);
343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 338 if (!cdev->drv->notify) {
344} 339 ret = -EOPNOTSUPP;
345 340 goto out;
346static void cmf_reenable_delayed(struct work_struct *work) 341 }
347{ 342 if (cdev->drv->notify(cdev, event))
348 struct ccw_device_private *priv; 343 ret = NOTIFY_OK;
349 struct ccw_device *cdev; 344 else
350 345 ret = NOTIFY_BAD;
351 priv = container_of(work, struct ccw_device_private, kick_work); 346out:
352 cdev = priv->cdev; 347 return ret;
353 cmf_reenable(cdev);
354} 348}
355 349
356static void ccw_device_oper_notify(struct ccw_device *cdev) 350static void ccw_device_oper_notify(struct ccw_device *cdev)
357{ 351{
358 if (ccw_device_notify(cdev, CIO_OPER)) { 352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
359 /* Reenable channel measurements, if needed. */ 353 /* Reenable channel measurements, if needed. */
360 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
361 queue_work(ccw_device_work, &cdev->private->kick_work);
362 return; 355 return;
363 } 356 }
364 /* Driver doesn't want device back. */ 357 /* Driver doesn't want device back. */
365 ccw_device_set_notoper(cdev); 358 ccw_device_set_notoper(cdev);
366 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); 359 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
367 queue_work(ccw_device_work, &cdev->private->kick_work);
368} 360}
369 361
370/* 362/*
@@ -391,15 +383,16 @@ ccw_device_done(struct ccw_device *cdev, int state)
391 case DEV_STATE_BOXED: 383 case DEV_STATE_BOXED:
392 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 384 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
393 cdev->private->dev_id.devno, sch->schid.sch_no); 385 cdev->private->dev_id.devno, sch->schid.sch_no);
394 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 386 if (cdev->online &&
395 ccw_device_schedule_sch_unregister(cdev); 387 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
388 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
396 cdev->private->flags.donotify = 0; 389 cdev->private->flags.donotify = 0;
397 break; 390 break;
398 case DEV_STATE_NOT_OPER: 391 case DEV_STATE_NOT_OPER:
399 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 392 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
400 cdev->private->dev_id.devno, sch->schid.sch_no); 393 cdev->private->dev_id.devno, sch->schid.sch_no);
401 if (!ccw_device_notify(cdev, CIO_GONE)) 394 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
402 ccw_device_schedule_sch_unregister(cdev); 395 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
403 else 396 else
404 ccw_device_set_disconnected(cdev); 397 ccw_device_set_disconnected(cdev);
405 cdev->private->flags.donotify = 0; 398 cdev->private->flags.donotify = 0;
@@ -408,8 +401,8 @@ ccw_device_done(struct ccw_device *cdev, int state)
408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
409 "%04x\n", cdev->private->dev_id.devno, 402 "%04x\n", cdev->private->dev_id.devno,
410 sch->schid.sch_no); 403 sch->schid.sch_no);
411 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
412 ccw_device_schedule_sch_unregister(cdev); 405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
413 else 406 else
414 ccw_device_set_disconnected(cdev); 407 ccw_device_set_disconnected(cdev);
415 cdev->private->flags.donotify = 0; 408 cdev->private->flags.donotify = 0;
@@ -425,107 +418,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
425 wake_up(&cdev->private->wait_q); 418 wake_up(&cdev->private->wait_q);
426} 419}
427 420
428static int cmp_pgid(struct pgid *p1, struct pgid *p2)
429{
430 char *c1;
431 char *c2;
432
433 c1 = (char *)p1;
434 c2 = (char *)p2;
435
436 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
437}
438
439static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
440{
441 int i;
442 int last;
443
444 last = 0;
445 for (i = 0; i < 8; i++) {
446 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
447 /* No PGID yet */
448 continue;
449 if (cdev->private->pgid[last].inf.ps.state1 ==
450 SNID_STATE1_RESET) {
451 /* First non-zero PGID */
452 last = i;
453 continue;
454 }
455 if (cmp_pgid(&cdev->private->pgid[i],
456 &cdev->private->pgid[last]) == 0)
457 /* Non-conflicting PGIDs */
458 continue;
459
460 /* PGID mismatch, can't pathgroup. */
461 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
462 "0.%x.%04x, can't pathgroup\n",
463 cdev->private->dev_id.ssid,
464 cdev->private->dev_id.devno);
465 cdev->private->options.pgroup = 0;
466 return;
467 }
468 if (cdev->private->pgid[last].inf.ps.state1 ==
469 SNID_STATE1_RESET)
470 /* No previous pgid found */
471 memcpy(&cdev->private->pgid[0],
472 &channel_subsystems[0]->global_pgid,
473 sizeof(struct pgid));
474 else
475 /* Use existing pgid */
476 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
477 sizeof(struct pgid));
478}
479
480/*
481 * Function called from device_pgid.c after sense path ground has completed.
482 */
483void
484ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
485{
486 struct subchannel *sch;
487
488 sch = to_subchannel(cdev->dev.parent);
489 switch (err) {
490 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
491 cdev->private->options.pgroup = 0;
492 break;
493 case 0: /* success */
494 case -EACCES: /* partial success, some paths not operational */
495 /* Check if all pgids are equal or 0. */
496 __ccw_device_get_common_pgid(cdev);
497 break;
498 case -ETIME: /* Sense path group id stopped by timeout. */
499 case -EUSERS: /* device is reserved for someone else. */
500 ccw_device_done(cdev, DEV_STATE_BOXED);
501 return;
502 default:
503 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
504 return;
505 }
506 /* Start Path Group verification. */
507 cdev->private->state = DEV_STATE_VERIFY;
508 cdev->private->flags.doverify = 0;
509 ccw_device_verify_start(cdev);
510}
511
512/* 421/*
513 * Start device recognition. 422 * Start device recognition.
514 */ 423 */
515int 424void ccw_device_recognition(struct ccw_device *cdev)
516ccw_device_recognition(struct ccw_device *cdev)
517{ 425{
518 struct subchannel *sch; 426 struct subchannel *sch = to_subchannel(cdev->dev.parent);
519 int ret;
520
521 sch = to_subchannel(cdev->dev.parent);
522 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
523 if (ret != 0)
524 /* Couldn't enable the subchannel for i/o. Sick device. */
525 return ret;
526
527 /* After 60s the device recognition is considered to have failed. */
528 ccw_device_set_timeout(cdev, 60*HZ);
529 427
530 /* 428 /*
531 * We used to start here with a sense pgid to find out whether a device 429 * We used to start here with a sense pgid to find out whether a device
@@ -537,32 +435,33 @@ ccw_device_recognition(struct ccw_device *cdev)
537 */ 435 */
538 cdev->private->flags.recog_done = 0; 436 cdev->private->flags.recog_done = 0;
539 cdev->private->state = DEV_STATE_SENSE_ID; 437 cdev->private->state = DEV_STATE_SENSE_ID;
438 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
439 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
440 return;
441 }
540 ccw_device_sense_id_start(cdev); 442 ccw_device_sense_id_start(cdev);
541 return 0;
542} 443}
543 444
544/* 445/*
545 * Handle timeout in device recognition. 446 * Handle events for states that use the ccw request infrastructure.
546 */ 447 */
547static void 448static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
548ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
549{ 449{
550 int ret; 450 switch (e) {
551 451 case DEV_EVENT_NOTOPER:
552 ret = ccw_device_cancel_halt_clear(cdev); 452 ccw_request_notoper(cdev);
553 switch (ret) {
554 case 0:
555 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
556 break; 453 break;
557 case -ENODEV: 454 case DEV_EVENT_INTERRUPT:
558 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 455 ccw_request_handler(cdev);
456 break;
457 case DEV_EVENT_TIMEOUT:
458 ccw_request_timeout(cdev);
559 break; 459 break;
560 default: 460 default:
561 ccw_device_set_timeout(cdev, 3*HZ); 461 break;
562 } 462 }
563} 463}
564 464
565
566void 465void
567ccw_device_verify_done(struct ccw_device *cdev, int err) 466ccw_device_verify_done(struct ccw_device *cdev, int err)
568{ 467{
@@ -571,21 +470,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
571 sch = to_subchannel(cdev->dev.parent); 470 sch = to_subchannel(cdev->dev.parent);
572 /* Update schib - pom may have changed. */ 471 /* Update schib - pom may have changed. */
573 if (cio_update_schib(sch)) { 472 if (cio_update_schib(sch)) {
574 cdev->private->flags.donotify = 0; 473 err = -ENODEV;
575 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 474 goto callback;
576 return;
577 } 475 }
578 /* Update lpm with verified path mask. */ 476 /* Update lpm with verified path mask. */
579 sch->lpm = sch->vpm; 477 sch->lpm = sch->vpm;
580 /* Repeat path verification? */ 478 /* Repeat path verification? */
581 if (cdev->private->flags.doverify) { 479 if (cdev->private->flags.doverify) {
582 cdev->private->flags.doverify = 0;
583 ccw_device_verify_start(cdev); 480 ccw_device_verify_start(cdev);
584 return; 481 return;
585 } 482 }
483callback:
586 switch (err) { 484 switch (err) {
587 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
588 cdev->private->options.pgroup = 0;
589 case 0: 485 case 0:
590 ccw_device_done(cdev, DEV_STATE_ONLINE); 486 ccw_device_done(cdev, DEV_STATE_ONLINE);
591 /* Deliver fake irb to device driver, if needed. */ 487 /* Deliver fake irb to device driver, if needed. */
@@ -604,18 +500,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
604 } 500 }
605 break; 501 break;
606 case -ETIME: 502 case -ETIME:
503 case -EUSERS:
607 /* Reset oper notify indication after verify error. */ 504 /* Reset oper notify indication after verify error. */
608 cdev->private->flags.donotify = 0; 505 cdev->private->flags.donotify = 0;
609 ccw_device_done(cdev, DEV_STATE_BOXED); 506 ccw_device_done(cdev, DEV_STATE_BOXED);
610 break; 507 break;
508 case -EACCES:
509 /* Reset oper notify indication after verify error. */
510 cdev->private->flags.donotify = 0;
511 ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
512 break;
611 default: 513 default:
612 /* Reset oper notify indication after verify error. */ 514 /* Reset oper notify indication after verify error. */
613 cdev->private->flags.donotify = 0; 515 cdev->private->flags.donotify = 0;
614 if (cdev->online) { 516 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
615 ccw_device_set_timeout(cdev, 0);
616 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
617 } else
618 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
619 break; 517 break;
620 } 518 }
621} 519}
@@ -640,17 +538,9 @@ ccw_device_online(struct ccw_device *cdev)
640 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 538 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
641 return ret; 539 return ret;
642 } 540 }
643 /* Do we want to do path grouping? */ 541 /* Start initial path verification. */
644 if (!cdev->private->options.pgroup) { 542 cdev->private->state = DEV_STATE_VERIFY;
645 /* Start initial path verification. */ 543 ccw_device_verify_start(cdev);
646 cdev->private->state = DEV_STATE_VERIFY;
647 cdev->private->flags.doverify = 0;
648 ccw_device_verify_start(cdev);
649 return 0;
650 }
651 /* Do a SensePGID first. */
652 cdev->private->state = DEV_STATE_SENSE_PGID;
653 ccw_device_sense_pgid_start(cdev);
654 return 0; 544 return 0;
655} 545}
656 546
@@ -666,7 +556,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
666 break; 556 break;
667 default: 557 default:
668 cdev->private->flags.donotify = 0; 558 cdev->private->flags.donotify = 0;
669 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
670 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 559 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
671 break; 560 break;
672 } 561 }
@@ -703,7 +592,7 @@ ccw_device_offline(struct ccw_device *cdev)
703 if (cdev->private->state != DEV_STATE_ONLINE) 592 if (cdev->private->state != DEV_STATE_ONLINE)
704 return -EINVAL; 593 return -EINVAL;
705 /* Are we doing path grouping? */ 594 /* Are we doing path grouping? */
706 if (!cdev->private->options.pgroup) { 595 if (!cdev->private->flags.pgroup) {
707 /* No, set state offline immediately. */ 596 /* No, set state offline immediately. */
708 ccw_device_done(cdev, DEV_STATE_OFFLINE); 597 ccw_device_done(cdev, DEV_STATE_OFFLINE);
709 return 0; 598 return 0;
@@ -715,43 +604,13 @@ ccw_device_offline(struct ccw_device *cdev)
715} 604}
716 605
717/* 606/*
718 * Handle timeout in device online/offline process.
719 */
720static void
721ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
722{
723 int ret;
724
725 ret = ccw_device_cancel_halt_clear(cdev);
726 switch (ret) {
727 case 0:
728 ccw_device_done(cdev, DEV_STATE_BOXED);
729 break;
730 case -ENODEV:
731 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
732 break;
733 default:
734 ccw_device_set_timeout(cdev, 3*HZ);
735 }
736}
737
738/*
739 * Handle not oper event in device recognition.
740 */
741static void
742ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
743{
744 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
745}
746
747/*
748 * Handle not operational event in non-special state. 607 * Handle not operational event in non-special state.
749 */ 608 */
750static void ccw_device_generic_notoper(struct ccw_device *cdev, 609static void ccw_device_generic_notoper(struct ccw_device *cdev,
751 enum dev_event dev_event) 610 enum dev_event dev_event)
752{ 611{
753 if (!ccw_device_notify(cdev, CIO_GONE)) 612 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
754 ccw_device_schedule_sch_unregister(cdev); 613 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
755 else 614 else
756 ccw_device_set_disconnected(cdev); 615 ccw_device_set_disconnected(cdev);
757} 616}
@@ -802,11 +661,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
802 } 661 }
803 /* Device is idle, we can do the path verification. */ 662 /* Device is idle, we can do the path verification. */
804 cdev->private->state = DEV_STATE_VERIFY; 663 cdev->private->state = DEV_STATE_VERIFY;
805 cdev->private->flags.doverify = 0;
806 ccw_device_verify_start(cdev); 664 ccw_device_verify_start(cdev);
807} 665}
808 666
809/* 667/*
668 * Handle path verification event in boxed state.
669 */
670static void ccw_device_boxed_verify(struct ccw_device *cdev,
671 enum dev_event dev_event)
672{
673 struct subchannel *sch = to_subchannel(cdev->dev.parent);
674
675 if (cdev->online) {
676 if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
677 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
678 else
679 ccw_device_online_verify(cdev, dev_event);
680 } else
681 css_schedule_eval(sch->schid);
682}
683
684/*
810 * Got an interrupt for a normal io (state online). 685 * Got an interrupt for a normal io (state online).
811 */ 686 */
812static void 687static void
@@ -815,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
815 struct irb *irb; 690 struct irb *irb;
816 int is_cmd; 691 int is_cmd;
817 692
818 irb = (struct irb *) __LC_IRB; 693 irb = (struct irb *)&S390_lowcore.irb;
819 is_cmd = !scsw_is_tm(&irb->scsw); 694 is_cmd = !scsw_is_tm(&irb->scsw);
820 /* Check for unsolicited interrupt. */ 695 /* Check for unsolicited interrupt. */
821 if (!scsw_is_solicited(&irb->scsw)) { 696 if (!scsw_is_solicited(&irb->scsw)) {
@@ -880,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
880{ 755{
881 struct irb *irb; 756 struct irb *irb;
882 757
883 irb = (struct irb *) __LC_IRB; 758 irb = (struct irb *)&S390_lowcore.irb;
884 /* Check for unsolicited interrupt. */ 759 /* Check for unsolicited interrupt. */
885 if (scsw_stctl(&irb->scsw) == 760 if (scsw_stctl(&irb->scsw) ==
886 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 761 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
@@ -904,12 +779,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
904 */ 779 */
905 if (scsw_fctl(&irb->scsw) & 780 if (scsw_fctl(&irb->scsw) &
906 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 781 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
907 /* Retry Basic Sense if requested. */
908 if (cdev->private->flags.intretry) {
909 cdev->private->flags.intretry = 0;
910 ccw_device_do_sense(cdev, irb);
911 return;
912 }
913 cdev->private->flags.dosense = 0; 782 cdev->private->flags.dosense = 0;
914 memset(&cdev->private->irb, 0, sizeof(struct irb)); 783 memset(&cdev->private->irb, 0, sizeof(struct irb));
915 ccw_device_accumulate_irb(cdev, irb); 784 ccw_device_accumulate_irb(cdev, irb);
@@ -933,21 +802,6 @@ call_handler:
933} 802}
934 803
935static void 804static void
936ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
937{
938 struct irb *irb;
939
940 irb = (struct irb *) __LC_IRB;
941 /* Accumulate status. We don't do basic sense. */
942 ccw_device_accumulate_irb(cdev, irb);
943 /* Remember to clear irb to avoid residuals. */
944 memset(&cdev->private->irb, 0, sizeof(struct irb));
945 /* Try to start delayed device verification. */
946 ccw_device_online_verify(cdev, 0);
947 /* Note: Don't call handler for cio initiated clear! */
948}
949
950static void
951ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 805ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
952{ 806{
953 struct subchannel *sch; 807 struct subchannel *sch;
@@ -1004,32 +858,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1004} 858}
1005 859
1006static void 860static void
1007ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1008{
1009 struct irb *irb;
1010
1011 switch (dev_event) {
1012 case DEV_EVENT_INTERRUPT:
1013 irb = (struct irb *) __LC_IRB;
1014 /* Check for unsolicited interrupt. */
1015 if ((scsw_stctl(&irb->scsw) ==
1016 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1017 (!scsw_cc(&irb->scsw)))
1018 /* FIXME: we should restart stlck here, but this
1019 * is extremely unlikely ... */
1020 goto out_wakeup;
1021
1022 ccw_device_accumulate_irb(cdev, irb);
1023 /* We don't care about basic sense etc. */
1024 break;
1025 default: /* timeout */
1026 break;
1027 }
1028out_wakeup:
1029 wake_up(&cdev->private->wait_q);
1030}
1031
1032static void
1033ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 861ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1034{ 862{
1035 struct subchannel *sch; 863 struct subchannel *sch;
@@ -1038,10 +866,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1038 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 866 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1039 /* Couldn't enable the subchannel for i/o. Sick device. */ 867 /* Couldn't enable the subchannel for i/o. Sick device. */
1040 return; 868 return;
1041
1042 /* After 60s the device recognition is considered to have failed. */
1043 ccw_device_set_timeout(cdev, 60*HZ);
1044
1045 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 869 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1046 ccw_device_sense_id_start(cdev); 870 ccw_device_sense_id_start(cdev);
1047} 871}
@@ -1072,22 +896,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1072 896
1073 /* We should also udate ssd info, but this has to wait. */ 897 /* We should also udate ssd info, but this has to wait. */
1074 /* Check if this is another device which appeared on the same sch. */ 898 /* Check if this is another device which appeared on the same sch. */
1075 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 899 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
1076 PREPARE_WORK(&cdev->private->kick_work, 900 css_schedule_eval(sch->schid);
1077 ccw_device_move_to_orphanage); 901 else
1078 queue_work(slow_path_wq, &cdev->private->kick_work);
1079 } else
1080 ccw_device_start_id(cdev, 0); 902 ccw_device_start_id(cdev, 0);
1081} 903}
1082 904
1083static void 905static void ccw_device_disabled_irq(struct ccw_device *cdev,
1084ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 906 enum dev_event dev_event)
1085{ 907{
1086 struct subchannel *sch; 908 struct subchannel *sch;
1087 909
1088 sch = to_subchannel(cdev->dev.parent); 910 sch = to_subchannel(cdev->dev.parent);
1089 /* 911 /*
1090 * An interrupt in state offline means a previous disable was not 912 * An interrupt in a disabled state means a previous disable was not
1091 * successful - should not happen, but we try to disable again. 913 * successful - should not happen, but we try to disable again.
1092 */ 914 */
1093 cio_disable_subchannel(sch); 915 cio_disable_subchannel(sch);
@@ -1113,10 +935,7 @@ static void
1113ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 935ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1114{ 936{
1115 ccw_device_set_timeout(cdev, 0); 937 ccw_device_set_timeout(cdev, 0);
1116 if (dev_event == DEV_EVENT_NOTOPER) 938 cdev->private->state = DEV_STATE_NOT_OPER;
1117 cdev->private->state = DEV_STATE_NOT_OPER;
1118 else
1119 cdev->private->state = DEV_STATE_OFFLINE;
1120 wake_up(&cdev->private->wait_q); 939 wake_up(&cdev->private->wait_q);
1121} 940}
1122 941
@@ -1126,17 +945,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1126 int ret; 945 int ret;
1127 946
1128 ret = ccw_device_cancel_halt_clear(cdev); 947 ret = ccw_device_cancel_halt_clear(cdev);
1129 switch (ret) { 948 if (ret == -EBUSY) {
1130 case 0: 949 ccw_device_set_timeout(cdev, HZ/10);
1131 cdev->private->state = DEV_STATE_OFFLINE; 950 } else {
1132 wake_up(&cdev->private->wait_q);
1133 break;
1134 case -ENODEV:
1135 cdev->private->state = DEV_STATE_NOT_OPER; 951 cdev->private->state = DEV_STATE_NOT_OPER;
1136 wake_up(&cdev->private->wait_q); 952 wake_up(&cdev->private->wait_q);
1137 break;
1138 default:
1139 ccw_device_set_timeout(cdev, HZ/10);
1140 } 953 }
1141} 954}
1142 955
@@ -1150,50 +963,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1150} 963}
1151 964
1152/* 965/*
1153 * Bug operation action.
1154 */
1155static void
1156ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1157{
1158 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1159 "0.%x.%04x\n", cdev->private->state, dev_event,
1160 cdev->private->dev_id.ssid,
1161 cdev->private->dev_id.devno);
1162 BUG();
1163}
1164
1165/*
1166 * device statemachine 966 * device statemachine
1167 */ 967 */
1168fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 968fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1169 [DEV_STATE_NOT_OPER] = { 969 [DEV_STATE_NOT_OPER] = {
1170 [DEV_EVENT_NOTOPER] = ccw_device_nop, 970 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1171 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 971 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1172 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 972 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1173 [DEV_EVENT_VERIFY] = ccw_device_nop, 973 [DEV_EVENT_VERIFY] = ccw_device_nop,
1174 }, 974 },
1175 [DEV_STATE_SENSE_PGID] = { 975 [DEV_STATE_SENSE_PGID] = {
1176 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 976 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1177 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 977 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1178 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 978 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1179 [DEV_EVENT_VERIFY] = ccw_device_nop, 979 [DEV_EVENT_VERIFY] = ccw_device_nop,
1180 }, 980 },
1181 [DEV_STATE_SENSE_ID] = { 981 [DEV_STATE_SENSE_ID] = {
1182 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 982 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1183 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 983 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1184 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 984 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1185 [DEV_EVENT_VERIFY] = ccw_device_nop, 985 [DEV_EVENT_VERIFY] = ccw_device_nop,
1186 }, 986 },
1187 [DEV_STATE_OFFLINE] = { 987 [DEV_STATE_OFFLINE] = {
1188 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 988 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1189 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 989 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1190 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 990 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1191 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 991 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1192 }, 992 },
1193 [DEV_STATE_VERIFY] = { 993 [DEV_STATE_VERIFY] = {
1194 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 994 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1195 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 995 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1196 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 996 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1197 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 997 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1198 }, 998 },
1199 [DEV_STATE_ONLINE] = { 999 [DEV_STATE_ONLINE] = {
@@ -1209,24 +1009,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1209 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1009 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1210 }, 1010 },
1211 [DEV_STATE_DISBAND_PGID] = { 1011 [DEV_STATE_DISBAND_PGID] = {
1212 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1012 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1213 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 1013 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1214 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1014 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1215 [DEV_EVENT_VERIFY] = ccw_device_nop, 1015 [DEV_EVENT_VERIFY] = ccw_device_nop,
1216 }, 1016 },
1217 [DEV_STATE_BOXED] = { 1017 [DEV_STATE_BOXED] = {
1218 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1018 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1219 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 1019 [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1220 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1221 [DEV_EVENT_VERIFY] = ccw_device_nop,
1222 },
1223 /* states to wait for i/o completion before doing something */
1224 [DEV_STATE_CLEAR_VERIFY] = {
1225 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1226 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1227 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1020 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1228 [DEV_EVENT_VERIFY] = ccw_device_nop, 1021 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1229 }, 1022 },
1023 /* states to wait for i/o completion before doing something */
1230 [DEV_STATE_TIMEOUT_KILL] = { 1024 [DEV_STATE_TIMEOUT_KILL] = {
1231 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1025 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1232 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1026 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1243,13 +1037,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1243 [DEV_STATE_DISCONNECTED] = { 1037 [DEV_STATE_DISCONNECTED] = {
1244 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1038 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1245 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1039 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1246 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1040 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1247 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1041 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1248 }, 1042 },
1249 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1043 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1250 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1044 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1251 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1045 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1252 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1046 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1253 [DEV_EVENT_VERIFY] = ccw_device_nop, 1047 [DEV_EVENT_VERIFY] = ccw_device_nop,
1254 }, 1048 },
1255 [DEV_STATE_CMFCHANGE] = { 1049 [DEV_STATE_CMFCHANGE] = {
@@ -1264,6 +1058,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1264 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1058 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1265 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1059 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1266 }, 1060 },
1061 [DEV_STATE_STEAL_LOCK] = {
1062 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1063 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1064 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1065 [DEV_EVENT_VERIFY] = ccw_device_nop,
1066 },
1267}; 1067};
1268 1068
1269EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1069EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34f..78a0b43862c5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
1/* 1/*
2 * drivers/s390/cio/device_id.c 2 * CCW device SENSE ID I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Sense ID functions.
10 */ 8 */
11 9
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h> 10#include <linux/kernel.h>
15 11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/errno.h>
16#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
17#include <asm/delay.h> 15#include <asm/setup.h>
18#include <asm/cio.h> 16#include <asm/cio.h>
19#include <asm/lowcore.h>
20#include <asm/diag.h> 17#include <asm/diag.h>
21 18
22#include "cio.h" 19#include "cio.h"
23#include "cio_debug.h" 20#include "cio_debug.h"
24#include "css.h"
25#include "device.h" 21#include "device.h"
26#include "ioasm.h"
27#include "io_sch.h" 22#include "io_sch.h"
28 23
24#define SENSE_ID_RETRIES 256
25#define SENSE_ID_TIMEOUT (10 * HZ)
26#define SENSE_ID_MIN_LEN 4
27#define SENSE_ID_BASIC_LEN 7
28
29/** 29/**
30 * vm_vdev_to_cu_type - Convert vm virtual device into control unit type 30 * diag210_to_senseid - convert diag 0x210 data to sense id information
31 * for certain devices. 31 * @senseid: sense id
32 * @class: virtual device class 32 * @diag: diag 0x210 data
33 * @type: virtual device type
34 * 33 *
35 * Returns control unit type if a match was made or %0xffff otherwise. 34 * Return 0 on success, non-zero otherwise.
36 */ 35 */
37static int vm_vdev_to_cu_type(int class, int type) 36static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
38{ 37{
39 static struct { 38 static struct {
40 int class, type, cu_type; 39 int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
71 }; 70 };
72 int i; 71 int i;
73 72
74 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 73 /* Special case for osa devices. */
75 if (class == vm_devices[i].class && type == vm_devices[i].type) 74 if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
76 return vm_devices[i].cu_type; 75 senseid->cu_type = 0x3088;
76 senseid->cu_model = 0x60;
77 senseid->reserved = 0xff;
78 return 0;
79 }
80 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
81 if (diag->vrdcvcla == vm_devices[i].class &&
82 diag->vrdcvtyp == vm_devices[i].type) {
83 senseid->cu_type = vm_devices[i].cu_type;
84 senseid->reserved = 0xff;
85 return 0;
86 }
87 }
77 88
78 return 0xffff; 89 return -ENODEV;
79} 90}
80 91
81/** 92/**
82 * diag_get_dev_info - retrieve device information via DIAG X'210' 93 * diag_get_dev_info - retrieve device information via diag 0x210
83 * @devno: device number 94 * @cdev: ccw device
84 * @ps: pointer to sense ID data area
85 * 95 *
86 * Returns zero on success, non-zero otherwise. 96 * Returns zero on success, non-zero otherwise.
87 */ 97 */
88static int diag_get_dev_info(u16 devno, struct senseid *ps) 98static int diag210_get_dev_info(struct ccw_device *cdev)
89{ 99{
100 struct ccw_dev_id *dev_id = &cdev->private->dev_id;
101 struct senseid *senseid = &cdev->private->senseid;
90 struct diag210 diag_data; 102 struct diag210 diag_data;
91 int ccode; 103 int rc;
92 104
93 CIO_TRACE_EVENT (4, "VMvdinf"); 105 if (dev_id->ssid != 0)
94 106 return -ENODEV;
95 diag_data = (struct diag210) { 107 memset(&diag_data, 0, sizeof(diag_data));
96 .vrdcdvno = devno, 108 diag_data.vrdcdvno = dev_id->devno;
97 .vrdclen = sizeof (diag_data), 109 diag_data.vrdclen = sizeof(diag_data);
98 }; 110 rc = diag210(&diag_data);
99 111 CIO_TRACE_EVENT(4, "diag210");
100 ccode = diag210 (&diag_data); 112 CIO_HEX_EVENT(4, &rc, sizeof(rc));
101 if ((ccode == 0) || (ccode == 2)) { 113 CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
102 ps->reserved = 0xff; 114 if (rc != 0 && rc != 2)
103 115 goto err_failed;
104 /* Special case for osa devices. */ 116 if (diag210_to_senseid(senseid, &diag_data))
105 if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { 117 goto err_unknown;
106 ps->cu_type = 0x3088; 118 return 0;
107 ps->cu_model = 0x60; 119
108 return 0; 120err_unknown:
109 } 121 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
110 ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, 122 dev_id->ssid, dev_id->devno);
111 diag_data.vrdcvtyp); 123 return -ENODEV;
112 if (ps->cu_type != 0xffff) 124err_failed:
113 return 0; 125 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
114 } 126 dev_id->ssid, dev_id->devno, rc);
115
116 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
117 "vdev class : %02X, vdev type : %04X \n ... "
118 "rdev class : %02X, rdev type : %04X, "
119 "rdev model: %02X\n",
120 devno, ccode,
121 diag_data.vrdcvcla, diag_data.vrdcvtyp,
122 diag_data.vrdcrccl, diag_data.vrdccrty,
123 diag_data.vrdccrmd);
124
125 return -ENODEV; 127 return -ENODEV;
126} 128}
127 129
128/* 130/*
129 * Start Sense ID helper function. 131 * Initialize SENSE ID data.
130 * Try to obtain the 'control unit'/'device type' information
131 * associated with the subchannel.
132 */ 132 */
133static int 133static void snsid_init(struct ccw_device *cdev)
134__ccw_device_sense_id_start(struct ccw_device *cdev)
135{
136 struct subchannel *sch;
137 struct ccw1 *ccw;
138 int ret;
139
140 sch = to_subchannel(cdev->dev.parent);
141 /* Setup sense channel program. */
142 ccw = cdev->private->iccws;
143 ccw->cmd_code = CCW_CMD_SENSE_ID;
144 ccw->cda = (__u32) __pa (&cdev->private->senseid);
145 ccw->count = sizeof (struct senseid);
146 ccw->flags = CCW_FLAG_SLI;
147
148 /* Reset device status. */
149 memset(&cdev->private->irb, 0, sizeof(struct irb));
150
151 /* Try on every path. */
152 ret = -ENODEV;
153 while (cdev->private->imask != 0) {
154 cdev->private->senseid.cu_type = 0xFFFF;
155 if ((sch->opm & cdev->private->imask) != 0 &&
156 cdev->private->iretry > 0) {
157 cdev->private->iretry--;
158 /* Reset internal retry indication. */
159 cdev->private->flags.intretry = 0;
160 ret = cio_start (sch, cdev->private->iccws,
161 cdev->private->imask);
162 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
163 if (ret != -EACCES)
164 return ret;
165 }
166 cdev->private->imask >>= 1;
167 cdev->private->iretry = 5;
168 }
169 return ret;
170}
171
172void
173ccw_device_sense_id_start(struct ccw_device *cdev)
174{ 134{
175 int ret; 135 cdev->private->flags.esid = 0;
176 136 memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
177 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 137 cdev->private->senseid.cu_type = 0xffff;
178 cdev->private->imask = 0x80;
179 cdev->private->iretry = 5;
180 ret = __ccw_device_sense_id_start(cdev);
181 if (ret && ret != -EBUSY)
182 ccw_device_sense_id_done(cdev, ret);
183} 138}
184 139
185/* 140/*
186 * Called from interrupt context to check if a valid answer 141 * Check for complete SENSE ID data.
187 * to Sense ID was received.
188 */ 142 */
189static int 143static int snsid_check(struct ccw_device *cdev, void *data)
190ccw_device_check_sense_id(struct ccw_device *cdev)
191{ 144{
192 struct subchannel *sch; 145 struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
193 struct irb *irb; 146 int len = sizeof(struct senseid) - scsw->count;
194 147
195 sch = to_subchannel(cdev->dev.parent); 148 /* Check for incomplete SENSE ID data. */
196 irb = &cdev->private->irb; 149 if (len < SENSE_ID_MIN_LEN)
197 150 goto out_restart;
198 /* Check the error cases. */ 151 if (cdev->private->senseid.cu_type == 0xffff)
199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 152 goto out_restart;
200 /* Retry Sense ID if requested. */ 153 /* Check for incompatible SENSE ID data. */
201 if (cdev->private->flags.intretry) { 154 if (cdev->private->senseid.reserved != 0xff)
202 cdev->private->flags.intretry = 0;
203 return -EAGAIN;
204 }
205 return -ETIME;
206 }
207 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
208 /*
209 * if the device doesn't support the SenseID
210 * command further retries wouldn't help ...
211 * NB: We don't check here for intervention required like we
212 * did before, because tape devices with no tape inserted
213 * may present this status *in conjunction with* the
214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy...
216 */
217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no);
221 return -EOPNOTSUPP; 155 return -EOPNOTSUPP;
222 } 156 /* Check for extended-identification information. */
223 if (irb->esw.esw0.erw.cons) { 157 if (len > SENSE_ID_BASIC_LEN)
224 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 158 cdev->private->flags.esid = 1;
225 "lpum %02X, cnt %02d, sns :" 159 return 0;
226 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
227 cdev->private->dev_id.ssid,
228 cdev->private->dev_id.devno,
229 irb->esw.esw0.sublog.lpum,
230 irb->esw.esw0.erw.scnt,
231 irb->ecw[0], irb->ecw[1],
232 irb->ecw[2], irb->ecw[3],
233 irb->ecw[4], irb->ecw[5],
234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN;
236 }
237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm;
239 160
240 lpm = to_io_private(sch)->orb.cmd.lpm; 161out_restart:
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 162 snsid_init(cdev);
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno,
246 sch->schid.ssid, sch->schid.sch_no);
247 return -EACCES;
248 }
249
250 /* Did we get a proper answer ? */
251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1;
255 return 0; /* Success */
256 }
257
258 /* Hmm, whatever happened, try again. */
259 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no,
263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 163 return -EAGAIN;
265} 164}
266 165
267/* 166/*
268 * Got interrupt for Sense ID. 167 * Process SENSE ID request result.
269 */ 168 */
270void 169static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
271ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
272{ 170{
273 struct subchannel *sch; 171 struct ccw_dev_id *id = &cdev->private->dev_id;
274 struct irb *irb; 172 struct senseid *senseid = &cdev->private->senseid;
275 int ret; 173 int vm = 0;
276 174
277 sch = to_subchannel(cdev->dev.parent); 175 if (rc && MACHINE_IS_VM) {
278 irb = (struct irb *) __LC_IRB; 176 /* Try diag 0x210 fallback on z/VM. */
279 /* Retry sense id, if needed. */ 177 snsid_init(cdev);
280 if (irb->scsw.cmd.stctl == 178 if (diag210_get_dev_info(cdev) == 0) {
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 179 rc = 0;
282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { 180 vm = 1;
283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret);
286 } 181 }
287 return;
288 } 182 }
289 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 183 CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
290 return; 184 "%04x/%02x%s\n", id->ssid, id->devno, rc,
291 ret = ccw_device_check_sense_id(cdev); 185 senseid->cu_type, senseid->cu_model, senseid->dev_type,
292 memset(&cdev->private->irb, 0, sizeof(struct irb)); 186 senseid->dev_model, vm ? " (diag210)" : "");
293 switch (ret) { 187 ccw_device_sense_id_done(cdev, rc);
294 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ 188}
295 case 0: /* Sense id succeeded. */
296 case -ETIME: /* Sense id stopped by timeout. */
297 ccw_device_sense_id_done(cdev, ret);
298 break;
299 case -EACCES: /* channel is not operational. */
300 sch->lpm &= ~cdev->private->imask;
301 cdev->private->imask >>= 1;
302 cdev->private->iretry = 5;
303 /* fall through. */
304 case -EAGAIN: /* try again. */
305 ret = __ccw_device_sense_id_start(cdev);
306 if (ret == 0 || ret == -EBUSY)
307 break;
308 /* fall through. */
309 default: /* Sense ID failed. Try asking VM. */
310 if (MACHINE_IS_VM)
311 ret = diag_get_dev_info(cdev->private->dev_id.devno,
312 &cdev->private->senseid);
313 else
314 /*
315 * If we can't couldn't identify the device type we
316 * consider the device "not operational".
317 */
318 ret = -ENODEV;
319 189
320 ccw_device_sense_id_done(cdev, ret); 190/**
321 break; 191 * ccw_device_sense_id_start - perform SENSE ID
322 } 192 * @cdev: ccw device
193 *
194 * Execute a SENSE ID channel program on @cdev to update its sense id
195 * information. When finished, call ccw_device_sense_id_done with a
196 * return code specifying the result.
197 */
198void ccw_device_sense_id_start(struct ccw_device *cdev)
199{
200 struct subchannel *sch = to_subchannel(cdev->dev.parent);
201 struct ccw_request *req = &cdev->private->req;
202 struct ccw1 *cp = cdev->private->iccws;
203
204 CIO_TRACE_EVENT(4, "snsid");
205 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
206 /* Data setup. */
207 snsid_init(cdev);
208 /* Channel program setup. */
209 cp->cmd_code = CCW_CMD_SENSE_ID;
210 cp->cda = (u32) (addr_t) &cdev->private->senseid;
211 cp->count = sizeof(struct senseid);
212 cp->flags = CCW_FLAG_SLI;
213 /* Request setup. */
214 memset(req, 0, sizeof(*req));
215 req->cp = cp;
216 req->timeout = SENSE_ID_TIMEOUT;
217 req->maxretries = SENSE_ID_RETRIES;
218 req->lpm = sch->schib.pmcw.pam & sch->opm;
219 req->check = snsid_check;
220 req->callback = snsid_callback;
221 ccw_request_start(cdev);
323} 222}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a290..6da84543dfe9 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/completion.h>
14 15
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/idals.h> 17#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
46 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 47 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
47 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 48 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
48 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 49 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
49 return 0; 51 return 0;
50} 52}
51 53
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
74 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; 76 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
75 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; 77 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
76 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; 78 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
77 return 0; 80 return 0;
78} 81}
79 82
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
90 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; 93 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
91 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; 94 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
92 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 95 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
93} 97}
94 98
95/** 99/**
100 * ccw_device_is_pathgroup - determine if paths to this device are grouped
101 * @cdev: ccw device
102 *
103 * Return non-zero if there is a path group, zero otherwise.
104 */
105int ccw_device_is_pathgroup(struct ccw_device *cdev)
106{
107 return cdev->private->flags.pgroup;
108}
109EXPORT_SYMBOL(ccw_device_is_pathgroup);
110
111/**
112 * ccw_device_is_multipath - determine if device is operating in multipath mode
113 * @cdev: ccw device
114 *
115 * Return non-zero if device is operating in multipath mode, zero otherwise.
116 */
117int ccw_device_is_multipath(struct ccw_device *cdev)
118{
119 return cdev->private->flags.mpath;
120}
121EXPORT_SYMBOL(ccw_device_is_multipath);
122
123/**
96 * ccw_device_clear() - terminate I/O request processing 124 * ccw_device_clear() - terminate I/O request processing
97 * @cdev: target ccw device 125 * @cdev: target ccw device
98 * @intparm: interruption parameter; value is only used if no I/O is 126 * @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
167 return -EINVAL; 195 return -EINVAL;
168 if (cdev->private->state == DEV_STATE_NOT_OPER) 196 if (cdev->private->state == DEV_STATE_NOT_OPER)
169 return -ENODEV; 197 return -ENODEV;
170 if (cdev->private->state == DEV_STATE_VERIFY || 198 if (cdev->private->state == DEV_STATE_VERIFY) {
171 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
172 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
173 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
174 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
478 return sch->lpm; 505 return sch->lpm;
479} 506}
480 507
481/* 508struct stlck_data {
482 * Try to break the lock on a boxed device. 509 struct completion done;
483 */ 510 int rc;
484int 511};
485ccw_device_stlck(struct ccw_device *cdev)
486{
487 void *buf, *buf2;
488 unsigned long flags;
489 struct subchannel *sch;
490 int ret;
491 512
492 if (!cdev) 513void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
493 return -ENODEV; 514{
515 struct stlck_data *sdata = data;
494 516
495 if (cdev->drv && !cdev->private->options.force) 517 sdata->rc = rc;
496 return -EINVAL; 518 complete(&sdata->done);
519}
497 520
498 sch = to_subchannel(cdev->dev.parent); 521/*
499 522 * Perform unconditional reserve + release.
500 CIO_TRACE_EVENT(2, "stl lock"); 523 */
501 CIO_TRACE_EVENT(2, dev_name(&cdev->dev)); 524int ccw_device_stlck(struct ccw_device *cdev)
525{
526 struct subchannel *sch = to_subchannel(cdev->dev.parent);
527 struct stlck_data data;
528 u8 *buffer;
529 int rc;
502 530
503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 531 /* Check if steal lock operation is valid for this device. */
504 if (!buf) 532 if (cdev->drv) {
505 return -ENOMEM; 533 if (!cdev->private->options.force)
506 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 534 return -EINVAL;
507 if (!buf2) {
508 kfree(buf);
509 return -ENOMEM;
510 } 535 }
511 spin_lock_irqsave(sch->lock, flags); 536 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
512 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 537 if (!buffer)
513 if (ret) 538 return -ENOMEM;
514 goto out_unlock; 539 init_completion(&data.done);
515 /* 540 data.rc = -EIO;
516 * Setup ccw. We chain an unconditional reserve and a release so we 541 spin_lock_irq(sch->lock);
517 * only break the lock. 542 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
518 */ 543 if (rc)
519 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
520 cdev->private->iccws[0].cda = (__u32) __pa(buf);
521 cdev->private->iccws[0].count = 32;
522 cdev->private->iccws[0].flags = CCW_FLAG_CC;
523 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
524 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
525 cdev->private->iccws[1].count = 32;
526 cdev->private->iccws[1].flags = 0;
527 ret = cio_start(sch, cdev->private->iccws, 0);
528 if (ret) {
529 cio_disable_subchannel(sch); //FIXME: return code?
530 goto out_unlock; 544 goto out_unlock;
545 /* Perform operation. */
546 cdev->private->state = DEV_STATE_STEAL_LOCK,
547 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
548 spin_unlock_irq(sch->lock);
549 /* Wait for operation to finish. */
550 if (wait_for_completion_interruptible(&data.done)) {
551 /* Got a signal. */
552 spin_lock_irq(sch->lock);
553 ccw_request_cancel(cdev);
554 spin_unlock_irq(sch->lock);
555 wait_for_completion(&data.done);
531 } 556 }
532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 557 rc = data.rc;
533 spin_unlock_irqrestore(sch->lock, flags); 558 /* Check results. */
534 wait_event(cdev->private->wait_q, 559 spin_lock_irq(sch->lock);
535 cdev->private->irb.scsw.cmd.actl == 0); 560 cio_disable_subchannel(sch);
536 spin_lock_irqsave(sch->lock, flags); 561 cdev->private->state = DEV_STATE_BOXED;
537 cio_disable_subchannel(sch); //FIXME: return code?
538 if ((cdev->private->irb.scsw.cmd.dstat !=
539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
540 (cdev->private->irb.scsw.cmd.cstat != 0))
541 ret = -EIO;
542 /* Clear irb. */
543 memset(&cdev->private->irb, 0, sizeof(struct irb));
544out_unlock: 562out_unlock:
545 kfree(buf); 563 spin_unlock_irq(sch->lock);
546 kfree(buf2); 564 kfree(buffer);
547 spin_unlock_irqrestore(sch->lock, flags); 565
548 return ret; 566 return rc;
549} 567}
550 568
551void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 569void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b3..6facb5499a65 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,568 @@
1/* 1/*
2 * drivers/s390/cio/device_pgid.c 2 * CCW device PGID and path verification I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Path Group ID functions.
10 */ 8 */
11 9
12#include <linux/module.h> 10#include <linux/kernel.h>
13#include <linux/init.h> 11#include <linux/string.h>
14 12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/bitops.h>
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/cio.h> 16#include <asm/cio.h>
17#include <asm/delay.h>
18#include <asm/lowcore.h>
19 17
20#include "cio.h" 18#include "cio.h"
21#include "cio_debug.h" 19#include "cio_debug.h"
22#include "css.h"
23#include "device.h" 20#include "device.h"
24#include "ioasm.h"
25#include "io_sch.h" 21#include "io_sch.h"
26 22
23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ)
25
27/* 26/*
28 * Helper function called from interrupt context to decide whether an 27 * Process path verification data and report result.
29 * operation should be tried again.
30 */ 28 */
31static int __ccw_device_should_retry(union scsw *scsw) 29static void verify_done(struct ccw_device *cdev, int rc)
32{ 30{
33 /* CC is only valid if start function bit is set. */ 31 struct subchannel *sch = to_subchannel(cdev->dev.parent);
34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) 32 struct ccw_dev_id *id = &cdev->private->dev_id;
35 return 1; 33 int mpath = cdev->private->flags.mpath;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 34 int pgroup = cdev->private->flags.pgroup;
37 if (!scsw->cmd.actl) 35
38 return 1; 36 if (rc)
39 return 0; 37 goto out;
38 /* Ensure consistent multipathing state at device and channel. */
39 if (sch->config.mp != mpath) {
40 sch->config.mp = mpath;
41 rc = cio_commit_config(sch);
42 }
43out:
44 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
45 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
46 sch->vpm);
47 ccw_device_verify_done(cdev, rc);
40} 48}
41 49
42/* 50/*
43 * Start Sense Path Group ID helper function. Used in ccw_device_recog 51 * Create channel program to perform a NOOP.
44 * and ccw_device_sense_pgid.
45 */ 52 */
46static int 53static void nop_build_cp(struct ccw_device *cdev)
47__ccw_device_sense_pgid_start(struct ccw_device *cdev)
48{ 54{
49 struct subchannel *sch; 55 struct ccw_request *req = &cdev->private->req;
50 struct ccw1 *ccw; 56 struct ccw1 *cp = cdev->private->iccws;
51 int ret; 57
52 int i; 58 cp->cmd_code = CCW_CMD_NOOP;
53 59 cp->cda = 0;
54 sch = to_subchannel(cdev->dev.parent); 60 cp->count = 0;
55 /* Return if we already checked on all paths. */ 61 cp->flags = CCW_FLAG_SLI;
56 if (cdev->private->imask == 0) 62 req->cp = cp;
57 return (sch->lpm == 0) ? -ENODEV : -EACCES;
58 i = 8 - ffs(cdev->private->imask);
59
60 /* Setup sense path group id channel program. */
61 ccw = cdev->private->iccws;
62 ccw->cmd_code = CCW_CMD_SENSE_PGID;
63 ccw->count = sizeof (struct pgid);
64 ccw->flags = CCW_FLAG_SLI;
65
66 /* Reset device status. */
67 memset(&cdev->private->irb, 0, sizeof(struct irb));
68 /* Try on every path. */
69 ret = -ENODEV;
70 while (cdev->private->imask != 0) {
71 /* Try every path multiple times. */
72 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
73 if (cdev->private->iretry > 0) {
74 cdev->private->iretry--;
75 /* Reset internal retry indication. */
76 cdev->private->flags.intretry = 0;
77 ret = cio_start (sch, cdev->private->iccws,
78 cdev->private->imask);
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES)
81 return ret;
82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n",
85 cdev->private->dev_id.devno,
86 sch->schid.ssid,
87 sch->schid.sch_no, cdev->private->imask);
88
89 }
90 cdev->private->imask >>= 1;
91 cdev->private->iretry = 5;
92 i++;
93 }
94
95 return ret;
96} 63}
97 64
98void 65/*
99ccw_device_sense_pgid_start(struct ccw_device *cdev) 66 * Perform NOOP on a single path.
67 */
68static void nop_do(struct ccw_device *cdev)
100{ 69{
101 int ret; 70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
102 71 struct ccw_request *req = &cdev->private->req;
103 /* Set a timeout of 60s */ 72
104 ccw_device_set_timeout(cdev, 60*HZ); 73 /* Adjust lpm. */
105 74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
106 cdev->private->state = DEV_STATE_SENSE_PGID; 75 if (!req->lpm)
107 cdev->private->imask = 0x80; 76 goto out_nopath;
108 cdev->private->iretry = 5; 77 nop_build_cp(cdev);
109 memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); 78 ccw_request_start(cdev);
110 ret = __ccw_device_sense_pgid_start(cdev); 79 return;
111 if (ret && ret != -EBUSY) 80
112 ccw_device_sense_pgid_done(cdev, ret); 81out_nopath:
82 verify_done(cdev, sch->vpm ? 0 : -EACCES);
113} 83}
114 84
115/* 85/*
116 * Called from interrupt context to check if a valid answer 86 * Adjust NOOP I/O status.
117 * to Sense Path Group ID was received.
118 */ 87 */
119static int 88static enum io_status nop_filter(struct ccw_device *cdev, void *data,
120__ccw_device_check_sense_pgid(struct ccw_device *cdev) 89 struct irb *irb, enum io_status status)
121{ 90{
122 struct subchannel *sch; 91 /* Only subchannel status might indicate a path error. */
123 struct irb *irb; 92 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
124 int i; 93 return IO_DONE;
125 94 return status;
126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb;
128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0;
132 return -EAGAIN;
133 }
134 return -ETIME;
135 }
136 if (irb->esw.esw0.erw.cons &&
137 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
138 /*
139 * If the device doesn't support the Sense Path Group ID
140 * command further retries wouldn't help ...
141 */
142 return -EOPNOTSUPP;
143 }
144 if (irb->esw.esw0.erw.cons) {
145 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
146 "lpum %02X, cnt %02d, sns : "
147 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
148 cdev->private->dev_id.ssid,
149 cdev->private->dev_id.devno,
150 irb->esw.esw0.sublog.lpum,
151 irb->esw.esw0.erw.scnt,
152 irb->ecw[0], irb->ecw[1],
153 irb->ecw[2], irb->ecw[3],
154 irb->ecw[4], irb->ecw[5],
155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN;
157 }
158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm);
166 return -EACCES;
167 }
168 i = 8 - ffs(cdev->private->imask);
169 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
170 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
171 "is reserved by someone else\n",
172 cdev->private->dev_id.devno, sch->schid.ssid,
173 sch->schid.sch_no);
174 return -EUSERS;
175 }
176 return 0;
177} 95}
178 96
179/* 97/*
180 * Got interrupt for Sense Path Group ID. 98 * Process NOOP request result for a single path.
181 */ 99 */
182void 100static void nop_callback(struct ccw_device *cdev, void *data, int rc)
183ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
184{ 101{
185 struct subchannel *sch; 102 struct subchannel *sch = to_subchannel(cdev->dev.parent);
186 struct irb *irb; 103 struct ccw_request *req = &cdev->private->req;
187 int ret; 104
188 105 if (rc == 0)
189 irb = (struct irb *) __LC_IRB; 106 sch->vpm |= req->lpm;
190 107 else if (rc != -EACCES)
191 if (irb->scsw.cmd.stctl == 108 goto err;
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 109 req->lpm >>= 1;
193 if (__ccw_device_should_retry(&irb->scsw)) { 110 nop_do(cdev);
194 ret = __ccw_device_sense_pgid_start(cdev); 111 return;
195 if (ret && ret != -EBUSY) 112
196 ccw_device_sense_pgid_done(cdev, ret); 113err:
197 } 114 verify_done(cdev, rc);
198 return;
199 }
200 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
201 return;
202 sch = to_subchannel(cdev->dev.parent);
203 ret = __ccw_device_check_sense_pgid(cdev);
204 memset(&cdev->private->irb, 0, sizeof(struct irb));
205 switch (ret) {
206 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
207 case -EOPNOTSUPP: /* Sense Path Group ID not supported */
208 ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
209 break;
210 case -ETIME: /* Sense path group id stopped by timeout. */
211 ccw_device_sense_pgid_done(cdev, -ETIME);
212 break;
213 case -EACCES: /* channel is not operational. */
214 sch->lpm &= ~cdev->private->imask;
215 /* Fall through. */
216 case 0: /* Sense Path Group ID successful. */
217 cdev->private->imask >>= 1;
218 cdev->private->iretry = 5;
219 /* Fall through. */
220 case -EAGAIN: /* Try again. */
221 ret = __ccw_device_sense_pgid_start(cdev);
222 if (ret != 0 && ret != -EBUSY)
223 ccw_device_sense_pgid_done(cdev, ret);
224 break;
225 case -EUSERS: /* device is reserved for someone else. */
226 ccw_device_sense_pgid_done(cdev, -EUSERS);
227 break;
228 }
229} 115}
230 116
231/* 117/*
232 * Path Group ID helper function. 118 * Create channel program to perform SET PGID on a single path.
233 */ 119 */
234static int 120static void spid_build_cp(struct ccw_device *cdev, u8 fn)
235__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
236{ 121{
237 struct subchannel *sch; 122 struct ccw_request *req = &cdev->private->req;
238 struct ccw1 *ccw; 123 struct ccw1 *cp = cdev->private->iccws;
239 int ret; 124 int i = 8 - ffs(req->lpm);
240 125 struct pgid *pgid = &cdev->private->pgid[i];
241 sch = to_subchannel(cdev->dev.parent); 126
242 127 pgid->inf.fc = fn;
243 /* Setup sense path group id channel program. */ 128 cp->cmd_code = CCW_CMD_SET_PGID;
244 cdev->private->pgid[0].inf.fc = func; 129 cp->cda = (u32) (addr_t) pgid;
245 ccw = cdev->private->iccws; 130 cp->count = sizeof(*pgid);
246 if (cdev->private->flags.pgid_single) 131 cp->flags = CCW_FLAG_SLI;
247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; 132 req->cp = cp;
248 else
249 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
250 ccw->cmd_code = CCW_CMD_SET_PGID;
251 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
252 ccw->count = sizeof (struct pgid);
253 ccw->flags = CCW_FLAG_SLI;
254
255 /* Reset device status. */
256 memset(&cdev->private->irb, 0, sizeof(struct irb));
257
258 /* Try multiple times. */
259 ret = -EACCES;
260 if (cdev->private->iretry > 0) {
261 cdev->private->iretry--;
262 /* Reset internal retry indication. */
263 cdev->private->flags.intretry = 0;
264 ret = cio_start (sch, cdev->private->iccws,
265 cdev->private->imask);
266 /* We expect an interrupt in case of success or busy
267 * indication. */
268 if ((ret == 0) || (ret == -EBUSY))
269 return ret;
270 }
271 /* PGID command failed on this path. */
272 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
273 "0.%x.%04x, lpm %02X, became 'not operational'\n",
274 cdev->private->dev_id.devno, sch->schid.ssid,
275 sch->schid.sch_no, cdev->private->imask);
276 return ret;
277} 133}
278 134
279/* 135/*
280 * Helper function to send a nop ccw down a path. 136 * Perform establish/resign SET PGID on a single path.
281 */ 137 */
282static int __ccw_device_do_nop(struct ccw_device *cdev) 138static void spid_do(struct ccw_device *cdev)
283{ 139{
284 struct subchannel *sch; 140 struct subchannel *sch = to_subchannel(cdev->dev.parent);
285 struct ccw1 *ccw; 141 struct ccw_request *req = &cdev->private->req;
286 int ret; 142 u8 fn;
287 143
288 sch = to_subchannel(cdev->dev.parent); 144 /* Use next available path that is not already in correct state. */
289 145 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
290 /* Setup nop channel program. */ 146 if (!req->lpm)
291 ccw = cdev->private->iccws; 147 goto out_nopath;
292 ccw->cmd_code = CCW_CMD_NOOP; 148 /* Channel program setup. */
293 ccw->cda = 0; 149 if (req->lpm & sch->opm)
294 ccw->count = 0; 150 fn = SPID_FUNC_ESTABLISH;
295 ccw->flags = CCW_FLAG_SLI; 151 else
296 152 fn = SPID_FUNC_RESIGN;
297 /* Reset device status. */ 153 if (cdev->private->flags.mpath)
298 memset(&cdev->private->irb, 0, sizeof(struct irb)); 154 fn |= SPID_FUNC_MULTI_PATH;
299 155 spid_build_cp(cdev, fn);
300 /* Try multiple times. */ 156 ccw_request_start(cdev);
301 ret = -EACCES; 157 return;
302 if (cdev->private->iretry > 0) { 158
303 cdev->private->iretry--; 159out_nopath:
304 /* Reset internal retry indication. */ 160 verify_done(cdev, sch->vpm ? 0 : -EACCES);
305 cdev->private->flags.intretry = 0;
306 ret = cio_start (sch, cdev->private->iccws,
307 cdev->private->imask);
308 /* We expect an interrupt in case of success or busy
309 * indication. */
310 if ((ret == 0) || (ret == -EBUSY))
311 return ret;
312 }
313 /* nop command failed on this path. */
314 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
315 "0.%x.%04x, lpm %02X, became 'not operational'\n",
316 cdev->private->dev_id.devno, sch->schid.ssid,
317 sch->schid.sch_no, cdev->private->imask);
318 return ret;
319} 161}
320 162
163static void verify_start(struct ccw_device *cdev);
321 164
322/* 165/*
323 * Called from interrupt context to check if a valid answer 166 * Process SET PGID request result for a single path.
324 * to Set Path Group ID was received.
325 */ 167 */
326static int 168static void spid_callback(struct ccw_device *cdev, void *data, int rc)
327__ccw_device_check_pgid(struct ccw_device *cdev)
328{ 169{
329 struct subchannel *sch; 170 struct subchannel *sch = to_subchannel(cdev->dev.parent);
330 struct irb *irb; 171 struct ccw_request *req = &cdev->private->req;
331 172
332 sch = to_subchannel(cdev->dev.parent); 173 switch (rc) {
333 irb = &cdev->private->irb; 174 case 0:
334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 175 sch->vpm |= req->lpm & sch->opm;
335 /* Retry Set PGID if requested. */ 176 break;
336 if (cdev->private->flags.intretry) { 177 case -EACCES:
337 cdev->private->flags.intretry = 0; 178 break;
338 return -EAGAIN; 179 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) {
181 /* Try without multipathing. */
182 cdev->private->flags.mpath = 0;
183 goto out_restart;
339 } 184 }
340 return -ETIME; 185 /* Try without pathgrouping. */
341 } 186 cdev->private->flags.pgroup = 0;
342 if (irb->esw.esw0.erw.cons) { 187 goto out_restart;
343 if (irb->ecw[0] & SNS0_CMD_REJECT) 188 default:
344 return -EOPNOTSUPP; 189 goto err;
345 /* Hmm, whatever happened, try again. */
346 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
347 "cnt %02d, "
348 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
349 cdev->private->dev_id.ssid,
350 cdev->private->dev_id.devno,
351 irb->esw.esw0.erw.scnt,
352 irb->ecw[0], irb->ecw[1],
353 irb->ecw[2], irb->ecw[3],
354 irb->ecw[4], irb->ecw[5],
355 irb->ecw[6], irb->ecw[7]);
356 return -EAGAIN;
357 } 190 }
358 if (irb->scsw.cmd.cc == 3) { 191 req->lpm >>= 1;
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 192 spid_do(cdev);
360 " lpm %02X, became 'not operational'\n", 193 return;
361 cdev->private->dev_id.devno, sch->schid.ssid, 194
362 sch->schid.sch_no, cdev->private->imask); 195out_restart:
363 return -EACCES; 196 verify_start(cdev);
364 } 197 return;
365 return 0; 198err:
199 verify_done(cdev, rc);
200}
201
202static void spid_start(struct ccw_device *cdev)
203{
204 struct ccw_request *req = &cdev->private->req;
205
206 /* Initialize request data. */
207 memset(req, 0, sizeof(*req));
208 req->timeout = PGID_TIMEOUT;
209 req->maxretries = PGID_RETRIES;
210 req->lpm = 0x80;
211 req->callback = spid_callback;
212 spid_do(cdev);
213}
214
215static int pgid_cmp(struct pgid *p1, struct pgid *p2)
216{
217 return memcmp((char *) p1 + 1, (char *) p2 + 1,
218 sizeof(struct pgid) - 1);
366} 219}
367 220
368/* 221/*
369 * Called from interrupt context to check the path status after a nop has 222 * Determine pathgroup state from PGID data.
370 * been send.
371 */ 223 */
372static int __ccw_device_check_nop(struct ccw_device *cdev) 224static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
225 int *mismatch, int *reserved, int *reset)
373{ 226{
374 struct subchannel *sch; 227 struct pgid *pgid = &cdev->private->pgid[0];
375 struct irb *irb; 228 struct pgid *first = NULL;
376 229 int lpm;
377 sch = to_subchannel(cdev->dev.parent); 230 int i;
378 irb = &cdev->private->irb; 231
379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 232 *mismatch = 0;
380 /* Retry NOP if requested. */ 233 *reserved = 0;
381 if (cdev->private->flags.intretry) { 234 *reset = 0;
382 cdev->private->flags.intretry = 0; 235 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
383 return -EAGAIN; 236 if ((cdev->private->pgid_valid_mask & lpm) == 0)
237 continue;
238 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
239 *reserved = 1;
240 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
241 /* A PGID was reset. */
242 *reset = 1;
243 continue;
384 } 244 }
385 return -ETIME; 245 if (!first) {
386 } 246 first = pgid;
387 if (irb->scsw.cmd.cc == 3) { 247 continue;
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 248 }
389 " lpm %02X, became 'not operational'\n", 249 if (pgid_cmp(pgid, first) != 0)
390 cdev->private->dev_id.devno, sch->schid.ssid, 250 *mismatch = 1;
391 sch->schid.sch_no, cdev->private->imask);
392 return -EACCES;
393 } 251 }
394 return 0; 252 if (!first)
253 first = &channel_subsystems[0]->global_pgid;
254 *p = first;
395} 255}
396 256
397static void 257static u8 pgid_to_donepm(struct ccw_device *cdev)
398__ccw_device_verify_start(struct ccw_device *cdev)
399{ 258{
400 struct subchannel *sch; 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
401 __u8 func; 260 struct pgid *pgid;
402 int ret; 261 int i;
403 262 int lpm;
404 sch = to_subchannel(cdev->dev.parent); 263 u8 donepm = 0;
405 /* Repeat for all paths. */ 264
406 for (; cdev->private->imask; cdev->private->imask >>= 1, 265 /* Set bits for paths which are already in the target state. */
407 cdev->private->iretry = 5) { 266 for (i = 0; i < 8; i++) {
408 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) 267 lpm = 0x80 >> i;
409 /* Path not available, try next. */ 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
410 continue; 269 continue;
411 if (cdev->private->options.pgroup) { 270 pgid = &cdev->private->pgid[i];
412 if (sch->opm & cdev->private->imask) 271 if (sch->opm & lpm) {
413 func = SPID_FUNC_ESTABLISH; 272 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
414 else 273 continue;
415 func = SPID_FUNC_RESIGN; 274 } else {
416 ret = __ccw_device_do_pgid(cdev, func); 275 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
417 } else 276 continue;
418 ret = __ccw_device_do_nop(cdev); 277 }
419 /* We expect an interrupt in case of success or busy 278 if (cdev->private->flags.mpath) {
420 * indication. */ 279 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
421 if (ret == 0 || ret == -EBUSY) 280 continue;
422 return; 281 } else {
423 /* Permanent path failure, try next. */ 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue;
284 }
285 donepm |= lpm;
424 } 286 }
425 /* Done with all paths. */ 287
426 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); 288 return donepm;
427} 289}
428 290
429/* 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
430 * Got interrupt for Set Path Group ID.
431 */
432void
433ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
434{ 292{
435 struct subchannel *sch; 293 int i;
436 struct irb *irb;
437 int ret;
438 294
439 irb = (struct irb *) __LC_IRB; 295 for (i = 0; i < 8; i++)
296 memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
297}
440 298
441 if (irb->scsw.cmd.stctl == 299/*
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 300 * Process SENSE PGID data and report result.
443 if (__ccw_device_should_retry(&irb->scsw)) 301 */
444 __ccw_device_verify_start(cdev); 302static void snid_done(struct ccw_device *cdev, int rc)
445 return; 303{
304 struct ccw_dev_id *id = &cdev->private->dev_id;
305 struct subchannel *sch = to_subchannel(cdev->dev.parent);
306 struct pgid *pgid;
307 int mismatch = 0;
308 int reserved = 0;
309 int reset = 0;
310 u8 donepm;
311
312 if (rc)
313 goto out;
314 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
315 if (reserved)
316 rc = -EUSERS;
317 else if (mismatch)
318 rc = -EOPNOTSUPP;
319 else {
320 donepm = pgid_to_donepm(cdev);
321 sch->vpm = donepm & sch->opm;
322 cdev->private->pgid_todo_mask &= ~donepm;
323 pgid_fill(cdev, pgid);
446 } 324 }
447 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 325out:
448 return; 326 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
449 sch = to_subchannel(cdev->dev.parent); 327 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
450 if (cdev->private->options.pgroup) 328 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
451 ret = __ccw_device_check_pgid(cdev); 329 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
452 else 330 switch (rc) {
453 ret = __ccw_device_check_nop(cdev);
454 memset(&cdev->private->irb, 0, sizeof(struct irb));
455
456 switch (ret) {
457 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
458 case 0: 331 case 0:
459 /* Path verification ccw finished successfully, update lpm. */ 332 /* Anything left to do? */
460 sch->vpm |= sch->opm & cdev->private->imask; 333 if (cdev->private->pgid_todo_mask == 0) {
461 /* Go on with next path. */ 334 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
462 cdev->private->imask >>= 1; 335 return;
463 cdev->private->iretry = 5; 336 }
464 __ccw_device_verify_start(cdev); 337 /* Perform path-grouping. */
338 spid_start(cdev);
465 break; 339 break;
466 case -EOPNOTSUPP: 340 case -EOPNOTSUPP:
467 /* 341 /* Path-grouping not supported. */
468 * One of those strange devices which claim to be able 342 cdev->private->flags.pgroup = 0;
469 * to do multipathing but not for Set Path Group ID. 343 cdev->private->flags.mpath = 0;
470 */ 344 verify_start(cdev);
471 if (cdev->private->flags.pgid_single)
472 cdev->private->options.pgroup = 0;
473 else
474 cdev->private->flags.pgid_single = 1;
475 /* Retry */
476 sch->vpm = 0;
477 cdev->private->imask = 0x80;
478 cdev->private->iretry = 5;
479 /* fall through. */
480 case -EAGAIN: /* Try again. */
481 __ccw_device_verify_start(cdev);
482 break;
483 case -ETIME: /* Set path group id stopped by timeout. */
484 ccw_device_verify_done(cdev, -ETIME);
485 break;
486 case -EACCES: /* channel is not operational. */
487 cdev->private->imask >>= 1;
488 cdev->private->iretry = 5;
489 __ccw_device_verify_start(cdev);
490 break; 345 break;
346 default:
347 verify_done(cdev, rc);
491 } 348 }
492} 349}
493 350
494void 351/*
495ccw_device_verify_start(struct ccw_device *cdev) 352 * Create channel program to perform a SENSE PGID on a single path.
353 */
354static void snid_build_cp(struct ccw_device *cdev)
355{
356 struct ccw_request *req = &cdev->private->req;
357 struct ccw1 *cp = cdev->private->iccws;
358 int i = 8 - ffs(req->lpm);
359
360 /* Channel program setup. */
361 cp->cmd_code = CCW_CMD_SENSE_PGID;
362 cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
363 cp->count = sizeof(struct pgid);
364 cp->flags = CCW_FLAG_SLI;
365 req->cp = cp;
366}
367
368/*
369 * Perform SENSE PGID on a single path.
370 */
371static void snid_do(struct ccw_device *cdev)
496{ 372{
497 struct subchannel *sch = to_subchannel(cdev->dev.parent); 373 struct subchannel *sch = to_subchannel(cdev->dev.parent);
374 struct ccw_request *req = &cdev->private->req;
375
376 /* Adjust lpm if paths are not set in pam. */
377 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
378 if (!req->lpm)
379 goto out_nopath;
380 snid_build_cp(cdev);
381 ccw_request_start(cdev);
382 return;
383
384out_nopath:
385 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
386}
498 387
499 cdev->private->flags.pgid_single = 0; 388/*
500 cdev->private->imask = 0x80; 389 * Process SENSE PGID request result for single path.
501 cdev->private->iretry = 5; 390 */
391static void snid_callback(struct ccw_device *cdev, void *data, int rc)
392{
393 struct ccw_request *req = &cdev->private->req;
394
395 if (rc == 0)
396 cdev->private->pgid_valid_mask |= req->lpm;
397 else if (rc != -EACCES)
398 goto err;
399 req->lpm >>= 1;
400 snid_do(cdev);
401 return;
402
403err:
404 snid_done(cdev, rc);
405}
502 406
503 /* Start with empty vpm. */ 407/*
504 sch->vpm = 0; 408 * Perform path verification.
409 */
410static void verify_start(struct ccw_device *cdev)
411{
412 struct subchannel *sch = to_subchannel(cdev->dev.parent);
413 struct ccw_request *req = &cdev->private->req;
414 struct ccw_dev_id *devid = &cdev->private->dev_id;
505 415
506 /* Get current pam. */ 416 sch->vpm = 0;
507 if (cio_update_schib(sch)) { 417 sch->lpm = sch->schib.pmcw.pam;
508 ccw_device_verify_done(cdev, -ENODEV); 418 /* Initialize request data. */
509 return; 419 memset(req, 0, sizeof(*req));
420 req->timeout = PGID_TIMEOUT;
421 req->maxretries = PGID_RETRIES;
422 req->lpm = 0x80;
423 if (cdev->private->flags.pgroup) {
424 CIO_TRACE_EVENT(4, "snid");
425 CIO_HEX_EVENT(4, devid, sizeof(*devid));
426 req->callback = snid_callback;
427 snid_do(cdev);
428 } else {
429 CIO_TRACE_EVENT(4, "nop");
430 CIO_HEX_EVENT(4, devid, sizeof(*devid));
431 req->filter = nop_filter;
432 req->callback = nop_callback;
433 nop_do(cdev);
510 } 434 }
511 /* After 60s path verification is considered to have failed. */
512 ccw_device_set_timeout(cdev, 60*HZ);
513 __ccw_device_verify_start(cdev);
514} 435}
515 436
516static void 437/**
517__ccw_device_disband_start(struct ccw_device *cdev) 438 * ccw_device_verify_start - perform path verification
439 * @cdev: ccw device
440 *
441 * Perform an I/O on each available channel path to @cdev to determine which
442 * paths are operational. The resulting path mask is stored in sch->vpm.
443 * If device options specify pathgrouping, establish a pathgroup for the
444 * operational paths. When finished, call ccw_device_verify_done with a
445 * return code specifying the result.
446 */
447void ccw_device_verify_start(struct ccw_device *cdev)
518{ 448{
519 struct subchannel *sch; 449 struct subchannel *sch = to_subchannel(cdev->dev.parent);
520 int ret; 450
521 451 CIO_TRACE_EVENT(4, "vrfy");
522 sch = to_subchannel(cdev->dev.parent); 452 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
523 while (cdev->private->imask != 0) { 453 /* Initialize PGID data. */
524 if (sch->lpm & cdev->private->imask) { 454 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
525 ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); 455 cdev->private->pgid_valid_mask = 0;
526 if (ret == 0) 456 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
527 return; 457 /*
528 } 458 * Initialize pathgroup and multipath state with target values.
529 cdev->private->iretry = 5; 459 * They may change in the course of path verification.
530 cdev->private->imask >>= 1; 460 */
531 } 461 cdev->private->flags.pgroup = cdev->private->options.pgroup;
532 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 462 cdev->private->flags.mpath = cdev->private->options.mpath;
463 cdev->private->flags.doverify = 0;
464 verify_start(cdev);
533} 465}
534 466
535/* 467/*
536 * Got interrupt for Unset Path Group ID. 468 * Process disband SET PGID request result.
537 */ 469 */
538void 470static void disband_callback(struct ccw_device *cdev, void *data, int rc)
539ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
540{ 471{
541 struct subchannel *sch; 472 struct subchannel *sch = to_subchannel(cdev->dev.parent);
542 struct irb *irb; 473 struct ccw_dev_id *id = &cdev->private->dev_id;
543 int ret; 474
475 if (rc)
476 goto out;
477 /* Ensure consistent multipathing state at device and channel. */
478 cdev->private->flags.mpath = 0;
479 if (sch->config.mp) {
480 sch->config.mp = 0;
481 rc = cio_commit_config(sch);
482 }
483out:
484 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
485 rc);
486 ccw_device_disband_done(cdev, rc);
487}
544 488
545 irb = (struct irb *) __LC_IRB; 489/**
490 * ccw_device_disband_start - disband pathgroup
491 * @cdev: ccw device
492 *
493 * Execute a SET PGID channel program on @cdev to disband a previously
494 * established pathgroup. When finished, call ccw_device_disband_done with
495 * a return code specifying the result.
496 */
497void ccw_device_disband_start(struct ccw_device *cdev)
498{
499 struct subchannel *sch = to_subchannel(cdev->dev.parent);
500 struct ccw_request *req = &cdev->private->req;
501 u8 fn;
502
503 CIO_TRACE_EVENT(4, "disb");
504 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
505 /* Request setup. */
506 memset(req, 0, sizeof(*req));
507 req->timeout = PGID_TIMEOUT;
508 req->maxretries = PGID_RETRIES;
509 req->lpm = sch->schib.pmcw.pam & sch->opm;
510 req->callback = disband_callback;
511 fn = SPID_FUNC_DISBAND;
512 if (cdev->private->flags.mpath)
513 fn |= SPID_FUNC_MULTI_PATH;
514 spid_build_cp(cdev, fn);
515 ccw_request_start(cdev);
516}
546 517
547 if (irb->scsw.cmd.stctl == 518static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 519{
549 if (__ccw_device_should_retry(&irb->scsw)) 520 struct ccw_request *req = &cdev->private->req;
550 __ccw_device_disband_start(cdev); 521 struct ccw1 *cp = cdev->private->iccws;
551 return; 522
552 } 523 cp[0].cmd_code = CCW_CMD_STLCK;
553 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 524 cp[0].cda = (u32) (addr_t) buf1;
554 return; 525 cp[0].count = 32;
555 sch = to_subchannel(cdev->dev.parent); 526 cp[0].flags = CCW_FLAG_CC;
556 ret = __ccw_device_check_pgid(cdev); 527 cp[1].cmd_code = CCW_CMD_RELEASE;
557 memset(&cdev->private->irb, 0, sizeof(struct irb)); 528 cp[1].cda = (u32) (addr_t) buf2;
558 switch (ret) { 529 cp[1].count = 32;
559 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 530 cp[1].flags = 0;
560 case 0: /* disband successful. */ 531 req->cp = cp;
561 ccw_device_disband_done(cdev, ret);
562 break;
563 case -EOPNOTSUPP:
564 /*
565 * One of those strange devices which claim to be able
566 * to do multipathing but not for Unset Path Group ID.
567 */
568 cdev->private->flags.pgid_single = 1;
569 /* fall through. */
570 case -EAGAIN: /* Try again. */
571 __ccw_device_disband_start(cdev);
572 break;
573 case -ETIME: /* Set path group id stopped by timeout. */
574 ccw_device_disband_done(cdev, -ETIME);
575 break;
576 case -EACCES: /* channel is not operational. */
577 cdev->private->imask >>= 1;
578 cdev->private->iretry = 5;
579 __ccw_device_disband_start(cdev);
580 break;
581 }
582} 532}
583 533
584void 534static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
585ccw_device_disband_start(struct ccw_device *cdev)
586{ 535{
587 /* After 60s disbanding is considered to have failed. */ 536 ccw_device_stlck_done(cdev, data, rc);
588 ccw_device_set_timeout(cdev, 60*HZ); 537}
589 538
590 cdev->private->flags.pgid_single = 0; 539/**
591 cdev->private->iretry = 5; 540 * ccw_device_stlck_start - perform unconditional release
592 cdev->private->imask = 0x80; 541 * @cdev: ccw device
593 __ccw_device_disband_start(cdev); 542 * @data: data pointer to be passed to ccw_device_stlck_done
543 * @buf1: data pointer used in channel program
544 * @buf2: data pointer used in channel program
545 *
546 * Execute a channel program on @cdev to release an existing PGID reservation.
547 * When finished, call ccw_device_stlck_done with a return code specifying the
548 * result.
549 */
550void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
551 void *buf2)
552{
553 struct subchannel *sch = to_subchannel(cdev->dev.parent);
554 struct ccw_request *req = &cdev->private->req;
555
556 CIO_TRACE_EVENT(4, "stlck");
557 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
558 /* Request setup. */
559 memset(req, 0, sizeof(*req));
560 req->timeout = PGID_TIMEOUT;
561 req->maxretries = PGID_RETRIES;
562 req->lpm = sch->schib.pmcw.pam & sch->opm;
563 req->data = data;
564 req->callback = stlck_callback;
565 stlck_build_cp(cdev, buf1, buf2);
566 ccw_request_start(cdev);
594} 567}
568
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee2410..66d8066ef22a 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
336 sense_ccw->count = SENSE_MAX_COUNT; 336 sense_ccw->count = SENSE_MAX_COUNT;
337 sense_ccw->flags = CCW_FLAG_SLI; 337 sense_ccw->flags = CCW_FLAG_SLI;
338 338
339 /* Reset internal retry indication. */
340 cdev->private->flags.intretry = 0;
341
342 rc = cio_start(sch, sense_ccw, 0xff); 339 rc = cio_start(sch, sense_ccw, 0xff);
343 if (rc == -ENODEV || rc == -EACCES) 340 if (rc == -ENODEV || rc == -EACCES)
344 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 341 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9b..ca5e9bb9d458 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
163 /* Add tcat to tccb. */ 163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw); 164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; 165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat)); 166 memset(tcat, 0, sizeof(*tcat));
167 /* Calculate tcw input/output count and tcat transport count. */ 167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb); 168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) 169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
269 */ 269 */
270void tsb_init(struct tsb *tsb) 270void tsb_init(struct tsb *tsb)
271{ 271{
272 memset(tsb, 0, sizeof(tsb)); 272 memset(tsb, 0, sizeof(*tsb));
273} 273}
274EXPORT_SYMBOL(tsb_init); 274EXPORT_SYMBOL(tsb_init);
275 275
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20e..b9ce712a7f25 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include <linux/types.h>
4#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h>
7#include "css.h"
5 8
6/* 9/*
7 * command-mode operation request block 10 * command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
68#define MAX_CIWS 8 71#define MAX_CIWS 8
69 72
70/* 73/*
74 * Possible status values for a CCW request's I/O.
75 */
76enum io_status {
77 IO_DONE,
78 IO_RUNNING,
79 IO_STATUS_ERROR,
80 IO_PATH_ERROR,
81 IO_REJECTED,
82 IO_KILLED
83};
84
85/**
86 * ccw_request - Internal CCW request.
87 * @cp: channel program to start
88 * @timeout: maximum allowable time in jiffies between start I/O and interrupt
89 * @maxretries: number of retries per I/O operation and path
90 * @lpm: mask of paths to use
91 * @check: optional callback that determines if results are final
92 * @filter: optional callback to adjust request status based on IRB data
93 * @callback: final callback
94 * @data: user-defined pointer passed to all callbacks
95 * @mask: current path mask
96 * @retries: current number of retries
97 * @drc: delayed return code
98 * @cancel: non-zero if request was cancelled
99 * @done: non-zero if request was finished
100 */
101struct ccw_request {
102 struct ccw1 *cp;
103 unsigned long timeout;
104 u16 maxretries;
105 u8 lpm;
106 int (*check)(struct ccw_device *, void *);
107 enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
108 enum io_status);
109 void (*callback)(struct ccw_device *, void *, int);
110 void *data;
111 /* These fields are used internally. */
112 u16 mask;
113 u16 retries;
114 int drc;
115 int cancel:1;
116 int done:1;
117} __attribute__((packed));
118
119/*
71 * sense-id response buffer layout 120 * sense-id response buffer layout
72 */ 121 */
73struct senseid { 122struct senseid {
@@ -82,32 +131,44 @@ struct senseid {
82 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ 131 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
83} __attribute__ ((packed, aligned(4))); 132} __attribute__ ((packed, aligned(4)));
84 133
134enum cdev_todo {
135 CDEV_TODO_NOTHING,
136 CDEV_TODO_ENABLE_CMF,
137 CDEV_TODO_REBIND,
138 CDEV_TODO_REGISTER,
139 CDEV_TODO_UNREG,
140 CDEV_TODO_UNREG_EVAL,
141};
142
85struct ccw_device_private { 143struct ccw_device_private {
86 struct ccw_device *cdev; 144 struct ccw_device *cdev;
87 struct subchannel *sch; 145 struct subchannel *sch;
88 int state; /* device state */ 146 int state; /* device state */
89 atomic_t onoff; 147 atomic_t onoff;
90 unsigned long registered;
91 struct ccw_dev_id dev_id; /* device id */ 148 struct ccw_dev_id dev_id; /* device id */
92 struct subchannel_id schid; /* subchannel number */ 149 struct subchannel_id schid; /* subchannel number */
93 u8 imask; /* lpm mask for SNID/SID/SPGID */ 150 struct ccw_request req; /* internal I/O request */
94 int iretry; /* retry counter SNID/SID/SPGID */ 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */
153 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
95 struct { 154 struct {
96 unsigned int fast:1; /* post with "channel end" */ 155 unsigned int fast:1; /* post with "channel end" */
97 unsigned int repall:1; /* report every interrupt status */ 156 unsigned int repall:1; /* report every interrupt status */
98 unsigned int pgroup:1; /* do path grouping */ 157 unsigned int pgroup:1; /* do path grouping */
99 unsigned int force:1; /* allow forced online */ 158 unsigned int force:1; /* allow forced online */
159 unsigned int mpath:1; /* do multipathing */
100 } __attribute__ ((packed)) options; 160 } __attribute__ ((packed)) options;
101 struct { 161 struct {
102 unsigned int pgid_single:1; /* use single path for Set PGID */
103 unsigned int esid:1; /* Ext. SenseID supported by HW */ 162 unsigned int esid:1; /* Ext. SenseID supported by HW */
104 unsigned int dosense:1; /* delayed SENSE required */ 163 unsigned int dosense:1; /* delayed SENSE required */
105 unsigned int doverify:1; /* delayed path verification */ 164 unsigned int doverify:1; /* delayed path verification */
106 unsigned int donotify:1; /* call notify function */ 165 unsigned int donotify:1; /* call notify function */
107 unsigned int recog_done:1; /* dev. recog. complete */ 166 unsigned int recog_done:1; /* dev. recog. complete */
108 unsigned int fake_irb:1; /* deliver faked irb */ 167 unsigned int fake_irb:1; /* deliver faked irb */
109 unsigned int intretry:1; /* retry internal operation */
110 unsigned int resuming:1; /* recognition while resume */ 168 unsigned int resuming:1; /* recognition while resume */
169 unsigned int pgroup:1; /* pathgroup is set up */
170 unsigned int mpath:1; /* multipathing is set up */
171 unsigned int initialized:1; /* set if initial reference held */
111 } __attribute__((packed)) flags; 172 } __attribute__((packed)) flags;
112 unsigned long intparm; /* user interruption parameter */ 173 unsigned long intparm; /* user interruption parameter */
113 struct qdio_irq *qdio_data; 174 struct qdio_irq *qdio_data;
@@ -115,7 +176,8 @@ struct ccw_device_private {
115 struct senseid senseid; /* SenseID info */ 176 struct senseid senseid; /* SenseID info */
116 struct pgid pgid[8]; /* path group IDs per chpid*/ 177 struct pgid pgid[8]; /* path group IDs per chpid*/
117 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 178 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
118 struct work_struct kick_work; 179 struct work_struct todo_work;
180 enum cdev_todo todo;
119 wait_queue_head_t wait_q; 181 wait_queue_head_t wait_q;
120 struct timer_list timer; 182 struct timer_list timer;
121 void *cmb; /* measurement information */ 183 void *cmb; /* measurement information */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff7748a9199d..48aa0647432b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,16 +182,53 @@ struct scssc_area {
182 u32:32; 182 u32:32;
183} __attribute__ ((packed)); 183} __attribute__ ((packed));
184 184
185struct qdio_dev_perf_stat {
186 unsigned int adapter_int;
187 unsigned int qdio_int;
188 unsigned int pci_request_int;
189
190 unsigned int tasklet_inbound;
191 unsigned int tasklet_inbound_resched;
192 unsigned int tasklet_inbound_resched2;
193 unsigned int tasklet_outbound;
194
195 unsigned int siga_read;
196 unsigned int siga_write;
197 unsigned int siga_sync;
198
199 unsigned int inbound_call;
200 unsigned int inbound_handler;
201 unsigned int stop_polling;
202 unsigned int inbound_queue_full;
203 unsigned int outbound_call;
204 unsigned int outbound_handler;
205 unsigned int fast_requeue;
206 unsigned int target_full;
207 unsigned int eqbs;
208 unsigned int eqbs_partial;
209 unsigned int sqbs;
210 unsigned int sqbs_partial;
211} ____cacheline_aligned;
212
213struct qdio_queue_perf_stat {
214 /*
215 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
216 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
217 * aka 127 SBALs found.
218 */
219 unsigned int nr_sbals[8];
220 unsigned int nr_sbal_error;
221 unsigned int nr_sbal_nop;
222 unsigned int nr_sbal_total;
223};
224
185struct qdio_input_q { 225struct qdio_input_q {
186 /* input buffer acknowledgement flag */ 226 /* input buffer acknowledgement flag */
187 int polling; 227 int polling;
188
189 /* first ACK'ed buffer */ 228 /* first ACK'ed buffer */
190 int ack_start; 229 int ack_start;
191
192 /* how much sbals are acknowledged with qebsm */ 230 /* how much sbals are acknowledged with qebsm */
193 int ack_count; 231 int ack_count;
194
195 /* last time of noticing incoming data */ 232 /* last time of noticing incoming data */
196 u64 timestamp; 233 u64 timestamp;
197}; 234};
@@ -199,40 +236,27 @@ struct qdio_input_q {
199struct qdio_output_q { 236struct qdio_output_q {
200 /* PCIs are enabled for the queue */ 237 /* PCIs are enabled for the queue */
201 int pci_out_enabled; 238 int pci_out_enabled;
202
203 /* IQDIO: output multiple buffers (enhanced SIGA) */ 239 /* IQDIO: output multiple buffers (enhanced SIGA) */
204 int use_enh_siga; 240 int use_enh_siga;
205
206 /* timer to check for more outbound work */ 241 /* timer to check for more outbound work */
207 struct timer_list timer; 242 struct timer_list timer;
208}; 243};
209 244
245/*
246 * Note on cache alignment: grouped slsb and write mostly data at the beginning
247 * sbal[] is read-only and starts on a new cacheline followed by read mostly.
248 */
210struct qdio_q { 249struct qdio_q {
211 struct slsb slsb; 250 struct slsb slsb;
251
212 union { 252 union {
213 struct qdio_input_q in; 253 struct qdio_input_q in;
214 struct qdio_output_q out; 254 struct qdio_output_q out;
215 } u; 255 } u;
216 256
217 /* queue number */
218 int nr;
219
220 /* bitmask of queue number */
221 int mask;
222
223 /* input or output queue */
224 int is_input_q;
225
226 /* list of thinint input queues */
227 struct list_head entry;
228
229 /* upper-layer program handler */
230 qdio_handler_t (*handler);
231
232 /* 257 /*
233 * inbound: next buffer the program should check for 258 * inbound: next buffer the program should check for
234 * outbound: next buffer to check for having been processed 259 * outbound: next buffer to check if adapter processed it
235 * by the card
236 */ 260 */
237 int first_to_check; 261 int first_to_check;
238 262
@@ -245,16 +269,32 @@ struct qdio_q {
245 /* number of buffers in use by the adapter */ 269 /* number of buffers in use by the adapter */
246 atomic_t nr_buf_used; 270 atomic_t nr_buf_used;
247 271
248 struct qdio_irq *irq_ptr;
249 struct dentry *debugfs_q;
250 struct tasklet_struct tasklet;
251
252 /* error condition during a data transfer */ 272 /* error condition during a data transfer */
253 unsigned int qdio_error; 273 unsigned int qdio_error;
254 274
255 struct sl *sl; 275 struct tasklet_struct tasklet;
256 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 276 struct qdio_queue_perf_stat q_stats;
277
278 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
279
280 /* queue number */
281 int nr;
282
283 /* bitmask of queue number */
284 int mask;
285
286 /* input or output queue */
287 int is_input_q;
288
289 /* list of thinint input queues */
290 struct list_head entry;
257 291
292 /* upper-layer program handler */
293 qdio_handler_t (*handler);
294
295 struct dentry *debugfs_q;
296 struct qdio_irq *irq_ptr;
297 struct sl *sl;
258 /* 298 /*
259 * Warning: Leave this member at the end so it won't be cleared in 299 * Warning: Leave this member at the end so it won't be cleared in
260 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * qdio_fill_qs. A page is allocated under this pointer and used for
@@ -269,6 +309,7 @@ struct qdio_irq {
269 u32 *dsci; /* address of device state change indicator */ 309 u32 *dsci; /* address of device state change indicator */
270 struct ccw_device *cdev; 310 struct ccw_device *cdev;
271 struct dentry *debugfs_dev; 311 struct dentry *debugfs_dev;
312 struct dentry *debugfs_perf;
272 313
273 unsigned long int_parm; 314 unsigned long int_parm;
274 struct subchannel_id schid; 315 struct subchannel_id schid;
@@ -286,13 +327,10 @@ struct qdio_irq {
286 struct ciw aqueue; 327 struct ciw aqueue;
287 328
288 struct qdio_ssqd_desc ssqd_desc; 329 struct qdio_ssqd_desc ssqd_desc;
289
290 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); 330 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
291 331
292 /* 332 int perf_stat_enabled;
293 * Warning: Leave these members together at the end so they won't be 333
294 * cleared in qdio_setup_irq.
295 */
296 struct qdr *qdr; 334 struct qdr *qdr;
297 unsigned long chsc_page; 335 unsigned long chsc_page;
298 336
@@ -301,6 +339,7 @@ struct qdio_irq {
301 339
302 debug_info_t *debug_area; 340 debug_info_t *debug_area;
303 struct mutex setup_mutex; 341 struct mutex setup_mutex;
342 struct qdio_dev_perf_stat perf_stat;
304}; 343};
305 344
306/* helper functions */ 345/* helper functions */
@@ -311,6 +350,21 @@ struct qdio_irq {
311 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ 350 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
312 css_general_characteristics.aif_osa) 351 css_general_characteristics.aif_osa)
313 352
353#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
354
355#define qperf_inc(__q, __attr) \
356({ \
357 struct qdio_irq *qdev = (__q)->irq_ptr; \
358 if (qdev->perf_stat_enabled) \
359 (qdev->perf_stat.__attr)++; \
360})
361
362static inline void account_sbals_error(struct qdio_q *q, int count)
363{
364 q->q_stats.nr_sbal_error += count;
365 q->q_stats.nr_sbal_total += count;
366}
367
314/* the highest iqdio queue is used for multicast */ 368/* the highest iqdio queue is used for multicast */
315static inline int multicast_outbound(struct qdio_q *q) 369static inline int multicast_outbound(struct qdio_q *q)
316{ 370{
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 76769978285f..6ce83f56d537 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -33,7 +33,6 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
33 DBF_HEX(&init_data->input_handler, sizeof(void *)); 33 DBF_HEX(&init_data->input_handler, sizeof(void *));
34 DBF_HEX(&init_data->output_handler, sizeof(void *)); 34 DBF_HEX(&init_data->output_handler, sizeof(void *));
35 DBF_HEX(&init_data->int_parm, sizeof(long)); 35 DBF_HEX(&init_data->int_parm, sizeof(long));
36 DBF_HEX(&init_data->flags, sizeof(long));
37 DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); 36 DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
38 DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); 37 DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
39 DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); 38 DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
@@ -55,14 +54,12 @@ static int qstat_show(struct seq_file *m, void *v)
55 if (!q) 54 if (!q)
56 return 0; 55 return 0;
57 56
58 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
59 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
60 seq_printf(m, "ftc: %d\n", q->first_to_check); 59 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
61 seq_printf(m, "last_move: %d\n", q->last_move); 60 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
62 seq_printf(m, "polling: %d\n", q->u.in.polling); 61 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
63 seq_printf(m, "ack start: %d\n", q->u.in.ack_start); 62 seq_printf(m, "SBAL states:\n");
64 seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
65 seq_printf(m, "slsb buffer states:\n");
66 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 63 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
67 64
68 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 65 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -99,6 +96,20 @@ static int qstat_show(struct seq_file *m, void *v)
99 } 96 }
100 seq_printf(m, "\n"); 97 seq_printf(m, "\n");
101 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); 98 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
99
100 seq_printf(m, "\nSBAL statistics:");
101 if (!q->irq_ptr->perf_stat_enabled) {
102 seq_printf(m, " disabled\n");
103 return 0;
104 }
105
106 seq_printf(m, "\n1 2.. 4.. 8.. "
107 "16.. 32.. 64.. 127\n");
108 for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
109 seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
110 seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
111 q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
112 q->q_stats.nr_sbal_total);
102 return 0; 113 return 0;
103} 114}
104 115
@@ -110,7 +121,6 @@ static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
110 121
111 if (!q) 122 if (!q)
112 return 0; 123 return 0;
113
114 if (q->is_input_q) 124 if (q->is_input_q)
115 xchg(q->irq_ptr->dsci, 1); 125 xchg(q->irq_ptr->dsci, 1);
116 local_bh_disable(); 126 local_bh_disable();
@@ -134,6 +144,103 @@ static const struct file_operations debugfs_fops = {
134 .release = single_release, 144 .release = single_release,
135}; 145};
136 146
147static char *qperf_names[] = {
148 "Assumed adapter interrupts",
149 "QDIO interrupts",
150 "Requested PCIs",
151 "Inbound tasklet runs",
152 "Inbound tasklet resched",
153 "Inbound tasklet resched2",
154 "Outbound tasklet runs",
155 "SIGA read",
156 "SIGA write",
157 "SIGA sync",
158 "Inbound calls",
159 "Inbound handler",
160 "Inbound stop_polling",
161 "Inbound queue full",
162 "Outbound calls",
163 "Outbound handler",
164 "Outbound fast_requeue",
165 "Outbound target_full",
166 "QEBSM eqbs",
167 "QEBSM eqbs partial",
168 "QEBSM sqbs",
169 "QEBSM sqbs partial"
170};
171
172static int qperf_show(struct seq_file *m, void *v)
173{
174 struct qdio_irq *irq_ptr = m->private;
175 unsigned int *stat;
176 int i;
177
178 if (!irq_ptr)
179 return 0;
180 if (!irq_ptr->perf_stat_enabled) {
181 seq_printf(m, "disabled\n");
182 return 0;
183 }
184 stat = (unsigned int *)&irq_ptr->perf_stat;
185
186 for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
187 seq_printf(m, "%26s:\t%u\n",
188 qperf_names[i], *(stat + i));
189 return 0;
190}
191
192static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
193 size_t count, loff_t *off)
194{
195 struct seq_file *seq = file->private_data;
196 struct qdio_irq *irq_ptr = seq->private;
197 struct qdio_q *q;
198 unsigned long val;
199 char buf[8];
200 int ret, i;
201
202 if (!irq_ptr)
203 return 0;
204 if (count >= sizeof(buf))
205 return -EINVAL;
206 if (copy_from_user(&buf, ubuf, count))
207 return -EFAULT;
208 buf[count] = 0;
209
210 ret = strict_strtoul(buf, 10, &val);
211 if (ret < 0)
212 return ret;
213
214 switch (val) {
215 case 0:
216 irq_ptr->perf_stat_enabled = 0;
217 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
218 for_each_input_queue(irq_ptr, q, i)
219 memset(&q->q_stats, 0, sizeof(q->q_stats));
220 for_each_output_queue(irq_ptr, q, i)
221 memset(&q->q_stats, 0, sizeof(q->q_stats));
222 break;
223 case 1:
224 irq_ptr->perf_stat_enabled = 1;
225 break;
226 }
227 return count;
228}
229
230static int qperf_seq_open(struct inode *inode, struct file *filp)
231{
232 return single_open(filp, qperf_show,
233 filp->f_path.dentry->d_inode->i_private);
234}
235
236static struct file_operations debugfs_perf_fops = {
237 .owner = THIS_MODULE,
238 .open = qperf_seq_open,
239 .read = seq_read,
240 .write = qperf_seq_write,
241 .llseek = seq_lseek,
242 .release = single_release,
243};
137static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) 244static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
138{ 245{
139 char name[QDIO_DEBUGFS_NAME_LEN]; 246 char name[QDIO_DEBUGFS_NAME_LEN];
@@ -156,6 +263,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
156 debugfs_root); 263 debugfs_root);
157 if (IS_ERR(irq_ptr->debugfs_dev)) 264 if (IS_ERR(irq_ptr->debugfs_dev))
158 irq_ptr->debugfs_dev = NULL; 265 irq_ptr->debugfs_dev = NULL;
266
267 irq_ptr->debugfs_perf = debugfs_create_file("statistics",
268 S_IFREG | S_IRUGO | S_IWUSR,
269 irq_ptr->debugfs_dev, irq_ptr,
270 &debugfs_perf_fops);
271 if (IS_ERR(irq_ptr->debugfs_perf))
272 irq_ptr->debugfs_perf = NULL;
273
159 for_each_input_queue(irq_ptr, q, i) 274 for_each_input_queue(irq_ptr, q, i)
160 setup_debugfs_entry(q, cdev); 275 setup_debugfs_entry(q, cdev);
161 for_each_output_queue(irq_ptr, q, i) 276 for_each_output_queue(irq_ptr, q, i)
@@ -171,6 +286,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
171 debugfs_remove(q->debugfs_q); 286 debugfs_remove(q->debugfs_q);
172 for_each_output_queue(irq_ptr, q, i) 287 for_each_output_queue(irq_ptr, q, i)
173 debugfs_remove(q->debugfs_q); 288 debugfs_remove(q->debugfs_q);
289 debugfs_remove(irq_ptr->debugfs_perf);
174 debugfs_remove(irq_ptr->debugfs_dev); 290 debugfs_remove(irq_ptr->debugfs_dev);
175} 291}
176 292
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84b9599..88be7b9ea6e1 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17#include <asm/debug.h> 18#include <asm/debug.h>
18#include <asm/qdio.h> 19#include <asm/qdio.h>
@@ -22,7 +23,6 @@
22#include "device.h" 23#include "device.h"
23#include "qdio.h" 24#include "qdio.h"
24#include "qdio_debug.h" 25#include "qdio_debug.h"
25#include "qdio_perf.h"
26 26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 28 "Jan Glauber <jang@linux.vnet.ibm.com>");
@@ -126,7 +126,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
126 int rc; 126 int rc;
127 127
128 BUG_ON(!q->irq_ptr->sch_token); 128 BUG_ON(!q->irq_ptr->sch_token);
129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); 129 qperf_inc(q, eqbs);
130 130
131 if (!q->is_input_q) 131 if (!q->is_input_q)
132 nr += q->irq_ptr->nr_input_qs; 132 nr += q->irq_ptr->nr_input_qs;
@@ -139,7 +139,7 @@ again:
139 * buffers later. 139 * buffers later.
140 */ 140 */
141 if ((ccq == 96) && (count != tmp_count)) { 141 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); 142 qperf_inc(q, eqbs_partial);
143 return (count - tmp_count); 143 return (count - tmp_count);
144 } 144 }
145 145
@@ -182,7 +182,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
182 return 0; 182 return 0;
183 183
184 BUG_ON(!q->irq_ptr->sch_token); 184 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); 185 qperf_inc(q, sqbs);
186 186
187 if (!q->is_input_q) 187 if (!q->is_input_q)
188 nr += q->irq_ptr->nr_input_qs; 188 nr += q->irq_ptr->nr_input_qs;
@@ -191,7 +191,7 @@ again:
191 rc = qdio_check_ccq(q, ccq); 191 rc = qdio_check_ccq(q, ccq);
192 if (rc == 1) { 192 if (rc == 1) {
193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); 194 qperf_inc(q, sqbs_partial);
195 goto again; 195 goto again;
196 } 196 }
197 if (rc < 0) { 197 if (rc < 0) {
@@ -285,7 +285,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 return 0; 285 return 0;
286 286
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qdio_perf_stat_inc(&perf_stats.siga_sync); 288 qperf_inc(q, siga_sync);
289 289
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 290 cc = do_siga_sync(q->irq_ptr->schid, output, input);
291 if (cc) 291 if (cc)
@@ -350,7 +350,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
350 int cc; 350 int cc;
351 351
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qdio_perf_stat_inc(&perf_stats.siga_in); 353 qperf_inc(q, siga_read);
354 354
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 355 cc = do_siga_input(q->irq_ptr->schid, q->mask);
356 if (cc) 356 if (cc)
@@ -382,7 +382,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
382 return; 382 return;
383 383
384 q->u.in.polling = 0; 384 q->u.in.polling = 0;
385 qdio_perf_stat_inc(&perf_stats.debug_stop_polling); 385 qperf_inc(q, stop_polling);
386 386
387 /* show the card that we are not polling anymore */ 387 /* show the card that we are not polling anymore */
388 if (is_qebsm(q)) { 388 if (is_qebsm(q)) {
@@ -393,6 +393,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
393 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 393 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
394} 394}
395 395
396static inline void account_sbals(struct qdio_q *q, int count)
397{
398 int pos = 0;
399
400 q->q_stats.nr_sbal_total += count;
401 if (count == QDIO_MAX_BUFFERS_MASK) {
402 q->q_stats.nr_sbals[7]++;
403 return;
404 }
405 while (count >>= 1)
406 pos++;
407 q->q_stats.nr_sbals[pos]++;
408}
409
396static void announce_buffer_error(struct qdio_q *q, int count) 410static void announce_buffer_error(struct qdio_q *q, int count)
397{ 411{
398 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 412 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@@ -400,7 +414,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
400 /* special handling for no target buffer empty */ 414 /* special handling for no target buffer empty */
401 if ((!q->is_input_q && 415 if ((!q->is_input_q &&
402 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 416 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
403 qdio_perf_stat_inc(&perf_stats.outbound_target_full); 417 qperf_inc(q, target_full);
404 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
405 q->first_to_check); 419 q->first_to_check);
406 return; 420 return;
@@ -486,17 +500,24 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
486 case SLSB_P_INPUT_PRIMED: 500 case SLSB_P_INPUT_PRIMED:
487 inbound_primed(q, count); 501 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count); 502 q->first_to_check = add_buf(q->first_to_check, count);
489 atomic_sub(count, &q->nr_buf_used); 503 if (atomic_sub(count, &q->nr_buf_used) == 0)
504 qperf_inc(q, inbound_queue_full);
505 if (q->irq_ptr->perf_stat_enabled)
506 account_sbals(q, count);
490 break; 507 break;
491 case SLSB_P_INPUT_ERROR: 508 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 509 announce_buffer_error(q, count);
493 /* process the buffer, the upper layer will take care of it */ 510 /* process the buffer, the upper layer will take care of it */
494 q->first_to_check = add_buf(q->first_to_check, count); 511 q->first_to_check = add_buf(q->first_to_check, count);
495 atomic_sub(count, &q->nr_buf_used); 512 atomic_sub(count, &q->nr_buf_used);
513 if (q->irq_ptr->perf_stat_enabled)
514 account_sbals_error(q, count);
496 break; 515 break;
497 case SLSB_CU_INPUT_EMPTY: 516 case SLSB_CU_INPUT_EMPTY:
498 case SLSB_P_INPUT_NOT_INIT: 517 case SLSB_P_INPUT_NOT_INIT:
499 case SLSB_P_INPUT_ACK: 518 case SLSB_P_INPUT_ACK:
519 if (q->irq_ptr->perf_stat_enabled)
520 q->q_stats.nr_sbal_nop++;
500 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 521 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
501 break; 522 break;
502 default: 523 default:
@@ -514,7 +535,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
514 535
515 if ((bufnr != q->last_move) || q->qdio_error) { 536 if ((bufnr != q->last_move) || q->qdio_error) {
516 q->last_move = bufnr; 537 q->last_move = bufnr;
517 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) 538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
518 q->u.in.timestamp = get_usecs(); 539 q->u.in.timestamp = get_usecs();
519 return 1; 540 return 1;
520 } else 541 } else
@@ -531,7 +552,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
531 qdio_siga_sync_q(q); 552 qdio_siga_sync_q(q);
532 get_buf_state(q, q->first_to_check, &state, 0); 553 get_buf_state(q, q->first_to_check, &state, 0);
533 554
534 if (state == SLSB_P_INPUT_PRIMED) 555 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
535 /* more work coming */ 556 /* more work coming */
536 return 0; 557 return 0;
537 558
@@ -566,11 +587,13 @@ static void qdio_kick_handler(struct qdio_q *q)
566 count = sub_buf(end, start); 587 count = sub_buf(end, start);
567 588
568 if (q->is_input_q) { 589 if (q->is_input_q) {
569 qdio_perf_stat_inc(&perf_stats.inbound_handler); 590 qperf_inc(q, inbound_handler);
570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 591 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
571 } else 592 } else {
593 qperf_inc(q, outbound_handler);
572 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 594 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
573 start, count); 595 start, count);
596 }
574 597
575 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 598 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
576 q->irq_ptr->int_parm); 599 q->irq_ptr->int_parm);
@@ -582,24 +605,28 @@ static void qdio_kick_handler(struct qdio_q *q)
582 605
583static void __qdio_inbound_processing(struct qdio_q *q) 606static void __qdio_inbound_processing(struct qdio_q *q)
584{ 607{
585 qdio_perf_stat_inc(&perf_stats.tasklet_inbound); 608 qperf_inc(q, tasklet_inbound);
586again: 609again:
587 if (!qdio_inbound_q_moved(q)) 610 if (!qdio_inbound_q_moved(q))
588 return; 611 return;
589 612
590 qdio_kick_handler(q); 613 qdio_kick_handler(q);
591 614
592 if (!qdio_inbound_q_done(q)) 615 if (!qdio_inbound_q_done(q)) {
593 /* means poll time is not yet over */ 616 /* means poll time is not yet over */
617 qperf_inc(q, tasklet_inbound_resched);
594 goto again; 618 goto again;
619 }
595 620
596 qdio_stop_polling(q); 621 qdio_stop_polling(q);
597 /* 622 /*
598 * We need to check again to not lose initiative after 623 * We need to check again to not lose initiative after
599 * resetting the ACK state. 624 * resetting the ACK state.
600 */ 625 */
601 if (!qdio_inbound_q_done(q)) 626 if (!qdio_inbound_q_done(q)) {
627 qperf_inc(q, tasklet_inbound_resched2);
602 goto again; 628 goto again;
629 }
603} 630}
604 631
605void qdio_inbound_processing(unsigned long data) 632void qdio_inbound_processing(unsigned long data)
@@ -638,15 +665,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
638 665
639 atomic_sub(count, &q->nr_buf_used); 666 atomic_sub(count, &q->nr_buf_used);
640 q->first_to_check = add_buf(q->first_to_check, count); 667 q->first_to_check = add_buf(q->first_to_check, count);
668 if (q->irq_ptr->perf_stat_enabled)
669 account_sbals(q, count);
641 break; 670 break;
642 case SLSB_P_OUTPUT_ERROR: 671 case SLSB_P_OUTPUT_ERROR:
643 announce_buffer_error(q, count); 672 announce_buffer_error(q, count);
644 /* process the buffer, the upper layer will take care of it */ 673 /* process the buffer, the upper layer will take care of it */
645 q->first_to_check = add_buf(q->first_to_check, count); 674 q->first_to_check = add_buf(q->first_to_check, count);
646 atomic_sub(count, &q->nr_buf_used); 675 atomic_sub(count, &q->nr_buf_used);
676 if (q->irq_ptr->perf_stat_enabled)
677 account_sbals_error(q, count);
647 break; 678 break;
648 case SLSB_CU_OUTPUT_PRIMED: 679 case SLSB_CU_OUTPUT_PRIMED:
649 /* the adapter has not fetched the output yet */ 680 /* the adapter has not fetched the output yet */
681 if (q->irq_ptr->perf_stat_enabled)
682 q->q_stats.nr_sbal_nop++;
650 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 683 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
651 break; 684 break;
652 case SLSB_P_OUTPUT_NOT_INIT: 685 case SLSB_P_OUTPUT_NOT_INIT:
@@ -687,7 +720,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
687 return 0; 720 return 0;
688 721
689 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 722 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
690 qdio_perf_stat_inc(&perf_stats.siga_out); 723 qperf_inc(q, siga_write);
691 724
692 cc = qdio_siga_output(q, &busy_bit); 725 cc = qdio_siga_output(q, &busy_bit);
693 switch (cc) { 726 switch (cc) {
@@ -710,7 +743,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
710 743
711static void __qdio_outbound_processing(struct qdio_q *q) 744static void __qdio_outbound_processing(struct qdio_q *q)
712{ 745{
713 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 746 qperf_inc(q, tasklet_outbound);
714 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 747 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
715 748
716 if (qdio_outbound_q_moved(q)) 749 if (qdio_outbound_q_moved(q))
@@ -738,12 +771,9 @@ static void __qdio_outbound_processing(struct qdio_q *q)
738 */ 771 */
739 if (qdio_outbound_q_done(q)) 772 if (qdio_outbound_q_done(q))
740 del_timer(&q->u.out.timer); 773 del_timer(&q->u.out.timer);
741 else { 774 else
742 if (!timer_pending(&q->u.out.timer)) { 775 if (!timer_pending(&q->u.out.timer))
743 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 776 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
744 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
745 }
746 }
747 return; 777 return;
748 778
749sched: 779sched:
@@ -783,7 +813,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
783 813
784static void __tiqdio_inbound_processing(struct qdio_q *q) 814static void __tiqdio_inbound_processing(struct qdio_q *q)
785{ 815{
786 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 816 qperf_inc(q, tasklet_inbound);
787 qdio_sync_after_thinint(q); 817 qdio_sync_after_thinint(q);
788 818
789 /* 819 /*
@@ -798,7 +828,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
798 qdio_kick_handler(q); 828 qdio_kick_handler(q);
799 829
800 if (!qdio_inbound_q_done(q)) { 830 if (!qdio_inbound_q_done(q)) {
801 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 831 qperf_inc(q, tasklet_inbound_resched);
802 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 832 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
803 tasklet_schedule(&q->tasklet); 833 tasklet_schedule(&q->tasklet);
804 return; 834 return;
@@ -811,7 +841,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
811 * resetting the ACK state. 841 * resetting the ACK state.
812 */ 842 */
813 if (!qdio_inbound_q_done(q)) { 843 if (!qdio_inbound_q_done(q)) {
814 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 844 qperf_inc(q, tasklet_inbound_resched2);
815 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 845 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
816 tasklet_schedule(&q->tasklet); 846 tasklet_schedule(&q->tasklet);
817 } 847 }
@@ -850,8 +880,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
850 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 880 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
851 return; 881 return;
852 882
853 qdio_perf_stat_inc(&perf_stats.pci_int);
854
855 for_each_input_queue(irq_ptr, q, i) 883 for_each_input_queue(irq_ptr, q, i)
856 tasklet_schedule(&q->tasklet); 884 tasklet_schedule(&q->tasklet);
857 885
@@ -922,8 +950,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
922 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 950 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
923 int cstat, dstat; 951 int cstat, dstat;
924 952
925 qdio_perf_stat_inc(&perf_stats.qdio_int);
926
927 if (!intparm || !irq_ptr) { 953 if (!intparm || !irq_ptr) {
928 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 954 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
929 return; 955 return;
@@ -962,6 +988,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
962 qdio_handle_activate_check(cdev, intparm, cstat, 988 qdio_handle_activate_check(cdev, intparm, cstat,
963 dstat); 989 dstat);
964 break; 990 break;
991 case QDIO_IRQ_STATE_STOPPED:
992 break;
965 default: 993 default:
966 WARN_ON(1); 994 WARN_ON(1);
967 } 995 }
@@ -1382,6 +1410,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1382{ 1410{
1383 int used, diff; 1411 int used, diff;
1384 1412
1413 qperf_inc(q, inbound_call);
1414
1385 if (!q->u.in.polling) 1415 if (!q->u.in.polling)
1386 goto set; 1416 goto set;
1387 1417
@@ -1437,14 +1467,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1437 unsigned char state; 1467 unsigned char state;
1438 int used, rc = 0; 1468 int used, rc = 0;
1439 1469
1440 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1470 qperf_inc(q, outbound_call);
1441 1471
1442 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1472 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1443 used = atomic_add_return(count, &q->nr_buf_used); 1473 used = atomic_add_return(count, &q->nr_buf_used);
1444 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1474 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1445 1475
1446 if (callflags & QDIO_FLAG_PCI_OUT) 1476 if (callflags & QDIO_FLAG_PCI_OUT) {
1447 q->u.out.pci_out_enabled = 1; 1477 q->u.out.pci_out_enabled = 1;
1478 qperf_inc(q, pci_request_int);
1479 }
1448 else 1480 else
1449 q->u.out.pci_out_enabled = 0; 1481 q->u.out.pci_out_enabled = 0;
1450 1482
@@ -1483,7 +1515,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1483 if (state != SLSB_CU_OUTPUT_PRIMED) 1515 if (state != SLSB_CU_OUTPUT_PRIMED)
1484 rc = qdio_kick_outbound_q(q); 1516 rc = qdio_kick_outbound_q(q);
1485 else 1517 else
1486 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1518 qperf_inc(q, fast_requeue);
1487 1519
1488out: 1520out:
1489 tasklet_schedule(&q->tasklet); 1521 tasklet_schedule(&q->tasklet);
@@ -1539,16 +1571,11 @@ static int __init init_QDIO(void)
1539 rc = qdio_debug_init(); 1571 rc = qdio_debug_init();
1540 if (rc) 1572 if (rc)
1541 goto out_ti; 1573 goto out_ti;
1542 rc = qdio_setup_perf_stats();
1543 if (rc)
1544 goto out_debug;
1545 rc = tiqdio_register_thinints(); 1574 rc = tiqdio_register_thinints();
1546 if (rc) 1575 if (rc)
1547 goto out_perf; 1576 goto out_debug;
1548 return 0; 1577 return 0;
1549 1578
1550out_perf:
1551 qdio_remove_perf_stats();
1552out_debug: 1579out_debug:
1553 qdio_debug_exit(); 1580 qdio_debug_exit();
1554out_ti: 1581out_ti:
@@ -1562,7 +1589,6 @@ static void __exit exit_QDIO(void)
1562{ 1589{
1563 tiqdio_unregister_thinints(); 1590 tiqdio_unregister_thinints();
1564 tiqdio_free_memory(); 1591 tiqdio_free_memory();
1565 qdio_remove_perf_stats();
1566 qdio_debug_exit(); 1592 qdio_debug_exit();
1567 qdio_setup_exit(); 1593 qdio_setup_exit();
1568} 1594}
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 968e3c7c2632..000000000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * drivers/s390/cio/qdio_perf.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/kernel.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <asm/ccwdev.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio_debug.h"
19#include "qdio_perf.h"
20
21int qdio_performance_stats;
22struct qdio_perf_stats perf_stats;
23
24#ifdef CONFIG_PROC_FS
25static struct proc_dir_entry *qdio_perf_pde;
26#endif
27
28/*
29 * procfs functions
30 */
31static int qdio_perf_proc_show(struct seq_file *m, void *v)
32{
33 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
34 (long)atomic_long_read(&perf_stats.qdio_int));
35 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36 (long)atomic_long_read(&perf_stats.pci_int));
37 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
38 (long)atomic_long_read(&perf_stats.thin_int));
39 seq_printf(m, "\n");
40 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
41 (long)atomic_long_read(&perf_stats.tasklet_inbound));
42 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
43 (long)atomic_long_read(&perf_stats.tasklet_outbound));
44 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
45 (long)atomic_long_read(&perf_stats.tasklet_thinint),
46 (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
47 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
48 (long)atomic_long_read(&perf_stats.thinint_inbound),
49 (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
50 seq_printf(m, "\n");
51 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
52 (long)atomic_long_read(&perf_stats.siga_in));
53 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54 (long)atomic_long_read(&perf_stats.siga_out));
55 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
56 (long)atomic_long_read(&perf_stats.siga_sync));
57 seq_printf(m, "\n");
58 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
59 (long)atomic_long_read(&perf_stats.inbound_handler));
60 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
61 (long)atomic_long_read(&perf_stats.outbound_handler));
62 seq_printf(m, "\n");
63 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
64 (long)atomic_long_read(&perf_stats.fast_requeue));
65 seq_printf(m, "Number of outbound target full condition\t: %li\n",
66 (long)atomic_long_read(&perf_stats.outbound_target_full));
67 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
68 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
69 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
70 (long)atomic_long_read(&perf_stats.debug_stop_polling));
71 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
72 (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
73 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
74 (long)atomic_long_read(&perf_stats.debug_eqbs_all),
75 (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
76 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
77 (long)atomic_long_read(&perf_stats.debug_sqbs_all),
78 (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
79 seq_printf(m, "\n");
80 return 0;
81}
82static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
83{
84 return single_open(filp, qdio_perf_proc_show, NULL);
85}
86
87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open,
90 .read = seq_read,
91 .llseek = seq_lseek,
92 .release = single_release,
93};
94
95/*
96 * sysfs functions
97 */
98static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
99{
100 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
101}
102
103static ssize_t qdio_perf_stats_store(struct bus_type *bus,
104 const char *buf, size_t count)
105{
106 unsigned long i;
107
108 if (strict_strtoul(buf, 16, &i) != 0)
109 return -EINVAL;
110 if ((i != 0) && (i != 1))
111 return -EINVAL;
112 if (i == qdio_performance_stats)
113 return count;
114
115 qdio_performance_stats = i;
116 /* reset performance statistics */
117 if (i == 0)
118 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
119 return count;
120}
121
122static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
123 qdio_perf_stats_store);
124
125int __init qdio_setup_perf_stats(void)
126{
127 int rc;
128
129 rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
130 if (rc)
131 return rc;
132
133#ifdef CONFIG_PROC_FS
134 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
135 qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
136 NULL, &qdio_perf_proc_fops);
137#endif
138 return 0;
139}
140
141void qdio_remove_perf_stats(void)
142{
143#ifdef CONFIG_PROC_FS
144 remove_proc_entry("qdio_perf", NULL);
145#endif
146 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
147}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index ff4504ce1e3c..000000000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * drivers/s390/cio/qdio_perf.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_PERF_H
9#define QDIO_PERF_H
10
11#include <linux/types.h>
12#include <asm/atomic.h>
13
14struct qdio_perf_stats {
15 /* interrupt handler calls */
16 atomic_long_t qdio_int;
17 atomic_long_t pci_int;
18 atomic_long_t thin_int;
19
20 /* tasklet runs */
21 atomic_long_t tasklet_inbound;
22 atomic_long_t tasklet_outbound;
23 atomic_long_t tasklet_thinint;
24 atomic_long_t tasklet_thinint_loop;
25 atomic_long_t thinint_inbound;
26 atomic_long_t thinint_inbound_loop;
27 atomic_long_t thinint_inbound_loop2;
28
29 /* signal adapter calls */
30 atomic_long_t siga_out;
31 atomic_long_t siga_in;
32 atomic_long_t siga_sync;
33
34 /* misc */
35 atomic_long_t inbound_handler;
36 atomic_long_t outbound_handler;
37 atomic_long_t fast_requeue;
38 atomic_long_t outbound_target_full;
39
40 /* for debugging */
41 atomic_long_t debug_tl_out_timer;
42 atomic_long_t debug_stop_polling;
43 atomic_long_t debug_eqbs_all;
44 atomic_long_t debug_eqbs_incomplete;
45 atomic_long_t debug_sqbs_all;
46 atomic_long_t debug_sqbs_incomplete;
47};
48
49extern struct qdio_perf_stats perf_stats;
50extern int qdio_performance_stats;
51
52static inline void qdio_perf_stat_inc(atomic_long_t *count)
53{
54 if (qdio_performance_stats)
55 atomic_long_inc(count);
56}
57
58int qdio_setup_perf_stats(void);
59void qdio_remove_perf_stats(void);
60
61#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce9..7f4a75465140 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -48,7 +48,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
48 if (!irq_ptr) 48 if (!irq_ptr)
49 return; 49 return;
50 50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format; 51 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field) 52 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field, 53 memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +81,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 81 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q) 82 if (!q)
84 return -ENOMEM; 83 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86 84
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 85 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) { 86 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q); 87 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM; 88 return -ENOMEM;
91 } 89 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q; 90 irq_ptr_qs[i] = q;
94 } 91 }
95 return 0; 92 return 0;
@@ -131,7 +128,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
131 /* fill in sbal */ 128 /* fill in sbal */
132 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 129 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
133 q->sbal[j] = *sbals_array++; 130 q->sbal[j] = *sbals_array++;
134 WARN_ON((unsigned long)q->sbal[j] & 0xff); 131 BUG_ON((unsigned long)q->sbal[j] & 0xff);
135 } 132 }
136 133
137 /* fill in slib */ 134 /* fill in slib */
@@ -147,11 +144,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
147 /* fill in sl */ 144 /* fill in sl */
148 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
149 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 146 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
150
151 DBF_EVENT("sl-slsb-sbal");
152 DBF_HEX(q->sl, sizeof(void *));
153 DBF_HEX(&q->slsb, sizeof(void *));
154 DBF_HEX(q->sbal, sizeof(void *));
155} 147}
156 148
157static void setup_queues(struct qdio_irq *irq_ptr, 149static void setup_queues(struct qdio_irq *irq_ptr,
@@ -341,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
341 irq_ptr->qdr->qdf0[i + nr].slsba = 333 irq_ptr->qdr->qdf0[i + nr].slsba =
342 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
343 335
344 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; 336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
345 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; 337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
346 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; 338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
347 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; 339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
348} 340}
349 341
350static void setup_qdr(struct qdio_irq *irq_ptr, 342static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -358,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
358 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
359 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
360 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
361 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; 353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
362 354
363 for (i = 0; i < qdio_init->no_input_qs; i++) 355 for (i = 0; i < qdio_init->no_input_qs; i++)
364 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -390,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
390 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
391 int rc; 383 int rc;
392 384
393 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); 385 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
386 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
387 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
388 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
389 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
390
391 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
392 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
393
394 /* wipes qib.ac, required by ar7063 */ 394 /* wipes qib.ac, required by ar7063 */
395 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 395 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
396 396
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 981a77ea7ee2..ce5f8910ff83 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,14 +1,13 @@
1/* 1/*
2 * linux/drivers/s390/cio/thinint_qdio.c 2 * linux/drivers/s390/cio/thinint_qdio.c
3 * 3 *
4 * thin interrupt support for qdio 4 * Copyright 2000,2009 IBM Corp.
5 *
6 * Copyright 2000-2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com>
10 */ 8 */
11#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h>
12#include <asm/atomic.h> 11#include <asm/atomic.h>
13#include <asm/debug.h> 12#include <asm/debug.h>
14#include <asm/qdio.h> 13#include <asm/qdio.h>
@@ -19,7 +18,6 @@
19#include "ioasm.h" 18#include "ioasm.h"
20#include "qdio.h" 19#include "qdio.h"
21#include "qdio_debug.h" 20#include "qdio_debug.h"
22#include "qdio_perf.h"
23 21
24/* 22/*
25 * Restriction: only 63 iqdio subchannels would have its own indicator, 23 * Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -132,8 +130,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
132{ 130{
133 struct qdio_q *q; 131 struct qdio_q *q;
134 132
135 qdio_perf_stat_inc(&perf_stats.thin_int);
136
137 /* 133 /*
138 * SVS only when needed: issue SVS to benefit from iqdio interrupt 134 * SVS only when needed: issue SVS to benefit from iqdio interrupt
139 * avoidance (SVS clears adapter interrupt suppression overwrite) 135 * avoidance (SVS clears adapter interrupt suppression overwrite)
@@ -154,6 +150,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
154 list_for_each_entry_rcu(q, &tiq_list, entry) 150 list_for_each_entry_rcu(q, &tiq_list, entry)
155 /* only process queues from changed sets */ 151 /* only process queues from changed sets */
156 if (*q->irq_ptr->dsci) { 152 if (*q->irq_ptr->dsci) {
153 qperf_inc(q, adapter_int);
157 154
158 /* only clear it if the indicator is non-shared */ 155 /* only clear it if the indicator is non-shared */
159 if (!shared_ind(q->irq_ptr)) 156 if (!shared_ind(q->irq_ptr))
@@ -202,8 +199,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
202 .code = 0x0021, 199 .code = 0x0021,
203 }; 200 };
204 scssc_area->operation_code = 0; 201 scssc_area->operation_code = 0;
205 scssc_area->ks = PAGE_DEFAULT_KEY; 202 scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
206 scssc_area->kc = PAGE_DEFAULT_KEY; 203 scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
207 scssc_area->isc = QDIO_AIRQ_ISC; 204 scssc_area->isc = QDIO_AIRQ_ISC;
208 scssc_area->schid = irq_ptr->schid; 205 scssc_area->schid = irq_ptr->schid;
209 206
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1294876bf7b4..91c6028d7b74 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -33,6 +33,7 @@
33#include <linux/err.h> 33#include <linux/err.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/slab.h>
36#include <linux/notifier.h> 37#include <linux/notifier.h>
37#include <linux/kthread.h> 38#include <linux/kthread.h>
38#include <linux/mutex.h> 39#include <linux/mutex.h>
@@ -102,6 +103,7 @@ static atomic_t ap_poll_requests = ATOMIC_INIT(0);
102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 103static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
103static struct task_struct *ap_poll_kthread = NULL; 104static struct task_struct *ap_poll_kthread = NULL;
104static DEFINE_MUTEX(ap_poll_thread_mutex); 105static DEFINE_MUTEX(ap_poll_thread_mutex);
106static DEFINE_SPINLOCK(ap_poll_timer_lock);
105static void *ap_interrupt_indicator; 107static void *ap_interrupt_indicator;
106static struct hrtimer ap_poll_timer; 108static struct hrtimer ap_poll_timer;
107/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 109/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
@@ -282,6 +284,7 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
282 * @psmid: The program supplied message identifier 284 * @psmid: The program supplied message identifier
283 * @msg: The message text 285 * @msg: The message text
284 * @length: The message length 286 * @length: The message length
287 * @special: Special Bit
285 * 288 *
286 * Returns AP queue status structure. 289 * Returns AP queue status structure.
287 * Condition code 1 on NQAP can't happen because the L bit is 1. 290 * Condition code 1 on NQAP can't happen because the L bit is 1.
@@ -289,7 +292,8 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
289 * because a segment boundary was reached. The NQAP is repeated. 292 * because a segment boundary was reached. The NQAP is repeated.
290 */ 293 */
291static inline struct ap_queue_status 294static inline struct ap_queue_status
292__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 295__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
296 unsigned int special)
293{ 297{
294 typedef struct { char _[length]; } msgblock; 298 typedef struct { char _[length]; } msgblock;
295 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 299 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
@@ -299,6 +303,9 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
299 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 303 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
300 register unsigned long reg5 asm ("5") = (unsigned int) psmid; 304 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
301 305
306 if (special == 1)
307 reg0 |= 0x400000UL;
308
302 asm volatile ( 309 asm volatile (
303 "0: .long 0xb2ad0042\n" /* DQAP */ 310 "0: .long 0xb2ad0042\n" /* DQAP */
304 " brc 2,0b" 311 " brc 2,0b"
@@ -312,13 +319,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
312{ 319{
313 struct ap_queue_status status; 320 struct ap_queue_status status;
314 321
315 status = __ap_send(qid, psmid, msg, length); 322 status = __ap_send(qid, psmid, msg, length, 0);
316 switch (status.response_code) { 323 switch (status.response_code) {
317 case AP_RESPONSE_NORMAL: 324 case AP_RESPONSE_NORMAL:
318 return 0; 325 return 0;
319 case AP_RESPONSE_Q_FULL: 326 case AP_RESPONSE_Q_FULL:
320 case AP_RESPONSE_RESET_IN_PROGRESS: 327 case AP_RESPONSE_RESET_IN_PROGRESS:
321 return -EBUSY; 328 return -EBUSY;
329 case AP_RESPONSE_REQ_FAC_NOT_INST:
330 return -EINVAL;
322 default: /* Device is gone. */ 331 default: /* Device is gone. */
323 return -ENODEV; 332 return -ENODEV;
324 } 333 }
@@ -1008,7 +1017,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
1008 } 1017 }
1009 1018
1010 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1019 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1011 msg, sizeof(msg)); 1020 msg, sizeof(msg), 0);
1012 if (status.response_code != AP_RESPONSE_NORMAL) { 1021 if (status.response_code != AP_RESPONSE_NORMAL) {
1013 rc = -ENODEV; 1022 rc = -ENODEV;
1014 goto out_free; 1023 goto out_free;
@@ -1163,16 +1172,19 @@ ap_config_timeout(unsigned long ptr)
1163static inline void ap_schedule_poll_timer(void) 1172static inline void ap_schedule_poll_timer(void)
1164{ 1173{
1165 ktime_t hr_time; 1174 ktime_t hr_time;
1175
1176 spin_lock_bh(&ap_poll_timer_lock);
1166 if (ap_using_interrupts() || ap_suspend_flag) 1177 if (ap_using_interrupts() || ap_suspend_flag)
1167 return; 1178 goto out;
1168 if (hrtimer_is_queued(&ap_poll_timer)) 1179 if (hrtimer_is_queued(&ap_poll_timer))
1169 return; 1180 goto out;
1170 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1181 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1171 hr_time = ktime_set(0, poll_timeout); 1182 hr_time = ktime_set(0, poll_timeout);
1172 hrtimer_forward_now(&ap_poll_timer, hr_time); 1183 hrtimer_forward_now(&ap_poll_timer, hr_time);
1173 hrtimer_restart(&ap_poll_timer); 1184 hrtimer_restart(&ap_poll_timer);
1174 } 1185 }
1175 return; 1186out:
1187 spin_unlock_bh(&ap_poll_timer_lock);
1176} 1188}
1177 1189
1178/** 1190/**
@@ -1243,7 +1255,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1243 /* Start the next request on the queue. */ 1255 /* Start the next request on the queue. */
1244 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1256 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1245 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1257 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1246 ap_msg->message, ap_msg->length); 1258 ap_msg->message, ap_msg->length, ap_msg->special);
1247 switch (status.response_code) { 1259 switch (status.response_code) {
1248 case AP_RESPONSE_NORMAL: 1260 case AP_RESPONSE_NORMAL:
1249 atomic_inc(&ap_poll_requests); 1261 atomic_inc(&ap_poll_requests);
@@ -1261,6 +1273,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1261 *flags |= 2; 1273 *flags |= 2;
1262 break; 1274 break;
1263 case AP_RESPONSE_MESSAGE_TOO_BIG: 1275 case AP_RESPONSE_MESSAGE_TOO_BIG:
1276 case AP_RESPONSE_REQ_FAC_NOT_INST:
1264 return -EINVAL; 1277 return -EINVAL;
1265 default: 1278 default:
1266 return -ENODEV; 1279 return -ENODEV;
@@ -1302,7 +1315,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1302 if (list_empty(&ap_dev->requestq) && 1315 if (list_empty(&ap_dev->requestq) &&
1303 ap_dev->queue_count < ap_dev->queue_depth) { 1316 ap_dev->queue_count < ap_dev->queue_depth) {
1304 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1317 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1305 ap_msg->message, ap_msg->length); 1318 ap_msg->message, ap_msg->length,
1319 ap_msg->special);
1306 switch (status.response_code) { 1320 switch (status.response_code) {
1307 case AP_RESPONSE_NORMAL: 1321 case AP_RESPONSE_NORMAL:
1308 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1322 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
@@ -1317,6 +1331,7 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1317 ap_dev->requestq_count++; 1331 ap_dev->requestq_count++;
1318 ap_dev->total_request_count++; 1332 ap_dev->total_request_count++;
1319 return -EBUSY; 1333 return -EBUSY;
1334 case AP_RESPONSE_REQ_FAC_NOT_INST:
1320 case AP_RESPONSE_MESSAGE_TOO_BIG: 1335 case AP_RESPONSE_MESSAGE_TOO_BIG:
1321 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1336 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1322 return -EINVAL; 1337 return -EINVAL;
@@ -1658,6 +1673,7 @@ int __init ap_module_init(void)
1658 */ 1673 */
1659 if (MACHINE_IS_VM) 1674 if (MACHINE_IS_VM)
1660 poll_timeout = 1500000; 1675 poll_timeout = 1500000;
1676 spin_lock_init(&ap_poll_timer_lock);
1661 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1677 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1662 ap_poll_timer.function = ap_poll_timeout; 1678 ap_poll_timer.function = ap_poll_timeout;
1663 1679
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index a35362241805..4785d07cd447 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -87,6 +87,7 @@ struct ap_queue_status {
87#define AP_RESPONSE_INDEX_TOO_BIG 0x11 87#define AP_RESPONSE_INDEX_TOO_BIG 0x11
88#define AP_RESPONSE_NO_FIRST_PART 0x13 88#define AP_RESPONSE_NO_FIRST_PART 0x13
89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
90#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
90 91
91/* 92/*
92 * Known device types 93 * Known device types
@@ -96,8 +97,8 @@ struct ap_queue_status {
96#define AP_DEVICE_TYPE_PCIXCC 5 97#define AP_DEVICE_TYPE_PCIXCC 5
97#define AP_DEVICE_TYPE_CEX2A 6 98#define AP_DEVICE_TYPE_CEX2A 6
98#define AP_DEVICE_TYPE_CEX2C 7 99#define AP_DEVICE_TYPE_CEX2C 7
99#define AP_DEVICE_TYPE_CEX2A2 8 100#define AP_DEVICE_TYPE_CEX3A 8
100#define AP_DEVICE_TYPE_CEX2C2 9 101#define AP_DEVICE_TYPE_CEX3C 9
101 102
102/* 103/*
103 * AP reset flag states 104 * AP reset flag states
@@ -161,12 +162,25 @@ struct ap_message {
161 size_t length; /* Message length. */ 162 size_t length; /* Message length. */
162 163
163 void *private; /* ap driver private pointer. */ 164 void *private; /* ap driver private pointer. */
165 unsigned int special:1; /* Used for special commands. */
164}; 166};
165 167
166#define AP_DEVICE(dt) \ 168#define AP_DEVICE(dt) \
167 .dev_type=(dt), \ 169 .dev_type=(dt), \
168 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 170 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
169 171
172/**
173 * ap_init_message() - Initialize ap_message.
174 * Initialize a message before using. Otherwise this might result in
175 * unexpected behaviour.
176 */
177static inline void ap_init_message(struct ap_message *ap_msg)
178{
179 ap_msg->psmid = 0;
180 ap_msg->length = 0;
181 ap_msg->special = 0;
182}
183
170/* 184/*
171 * Note: don't use ap_send/ap_recv after using ap_queue_message 185 * Note: don't use ap_send/ap_recv after using ap_queue_message
172 * for the first time. Otherwise the ap message queue will get 186 * for the first time. Otherwise the ap message queue will get
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 65b6a96afe6b..304caf549973 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -33,8 +33,10 @@
33#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/smp_lock.h> 38#include <linux/smp_lock.h>
39#include <linux/slab.h>
38#include <asm/atomic.h> 40#include <asm/atomic.h>
39#include <asm/uaccess.h> 41#include <asm/uaccess.h>
40#include <linux/hw_random.h> 42#include <linux/hw_random.h>
@@ -299,9 +301,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
299 */ 301 */
300static int zcrypt_open(struct inode *inode, struct file *filp) 302static int zcrypt_open(struct inode *inode, struct file *filp)
301{ 303{
302 lock_kernel();
303 atomic_inc(&zcrypt_open_count); 304 atomic_inc(&zcrypt_open_count);
304 unlock_kernel();
305 return 0; 305 return 0;
306} 306}
307 307
@@ -395,10 +395,12 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
395 * u_mult_inv > 128 bytes. 395 * u_mult_inv > 128 bytes.
396 */ 396 */
397 if (copied == 0) { 397 if (copied == 0) {
398 int len; 398 unsigned int len;
399 spin_unlock_bh(&zcrypt_device_lock); 399 spin_unlock_bh(&zcrypt_device_lock);
400 /* len is max 256 / 2 - 120 = 8 */ 400 /* len is max 256 / 2 - 120 = 8 */
401 len = crt->inputdatalength / 2 - 120; 401 len = crt->inputdatalength / 2 - 120;
402 if (len > sizeof(z1))
403 return -EFAULT;
402 z1 = z2 = z3 = 0; 404 z1 = z2 = z3 = 0;
403 if (copy_from_user(&z1, crt->np_prime, len) || 405 if (copy_from_user(&z1, crt->np_prime, len) ||
404 copy_from_user(&z2, crt->bp_key, len) || 406 copy_from_user(&z2, crt->bp_key, len) ||
@@ -912,122 +914,105 @@ static struct miscdevice zcrypt_misc_device = {
912 */ 914 */
913static struct proc_dir_entry *zcrypt_entry; 915static struct proc_dir_entry *zcrypt_entry;
914 916
915static int sprintcl(unsigned char *outaddr, unsigned char *addr, 917static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
916 unsigned int len)
917{ 918{
918 int hl, i; 919 int i;
919 920
920 hl = 0;
921 for (i = 0; i < len; i++) 921 for (i = 0; i < len; i++)
922 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); 922 seq_printf(m, "%01x", (unsigned int) addr[i]);
923 hl += sprintf(outaddr+hl, " "); 923 seq_putc(m, ' ');
924 return hl;
925} 924}
926 925
927static int sprintrw(unsigned char *outaddr, unsigned char *addr, 926static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
928 unsigned int len)
929{ 927{
930 int hl, inl, c, cx; 928 int inl, c, cx;
931 929
932 hl = sprintf(outaddr, " "); 930 seq_printf(m, " ");
933 inl = 0; 931 inl = 0;
934 for (c = 0; c < (len / 16); c++) { 932 for (c = 0; c < (len / 16); c++) {
935 hl += sprintcl(outaddr+hl, addr+inl, 16); 933 sprintcl(m, addr+inl, 16);
936 inl += 16; 934 inl += 16;
937 } 935 }
938 cx = len%16; 936 cx = len%16;
939 if (cx) { 937 if (cx) {
940 hl += sprintcl(outaddr+hl, addr+inl, cx); 938 sprintcl(m, addr+inl, cx);
941 inl += cx; 939 inl += cx;
942 } 940 }
943 hl += sprintf(outaddr+hl, "\n"); 941 seq_putc(m, '\n');
944 return hl;
945} 942}
946 943
947static int sprinthx(unsigned char *title, unsigned char *outaddr, 944static void sprinthx(unsigned char *title, struct seq_file *m,
948 unsigned char *addr, unsigned int len) 945 unsigned char *addr, unsigned int len)
949{ 946{
950 int hl, inl, r, rx; 947 int inl, r, rx;
951 948
952 hl = sprintf(outaddr, "\n%s\n", title); 949 seq_printf(m, "\n%s\n", title);
953 inl = 0; 950 inl = 0;
954 for (r = 0; r < (len / 64); r++) { 951 for (r = 0; r < (len / 64); r++) {
955 hl += sprintrw(outaddr+hl, addr+inl, 64); 952 sprintrw(m, addr+inl, 64);
956 inl += 64; 953 inl += 64;
957 } 954 }
958 rx = len % 64; 955 rx = len % 64;
959 if (rx) { 956 if (rx) {
960 hl += sprintrw(outaddr+hl, addr+inl, rx); 957 sprintrw(m, addr+inl, rx);
961 inl += rx; 958 inl += rx;
962 } 959 }
963 hl += sprintf(outaddr+hl, "\n"); 960 seq_putc(m, '\n');
964 return hl;
965} 961}
966 962
967static int sprinthx4(unsigned char *title, unsigned char *outaddr, 963static void sprinthx4(unsigned char *title, struct seq_file *m,
968 unsigned int *array, unsigned int len) 964 unsigned int *array, unsigned int len)
969{ 965{
970 int hl, r; 966 int r;
971 967
972 hl = sprintf(outaddr, "\n%s\n", title); 968 seq_printf(m, "\n%s\n", title);
973 for (r = 0; r < len; r++) { 969 for (r = 0; r < len; r++) {
974 if ((r % 8) == 0) 970 if ((r % 8) == 0)
975 hl += sprintf(outaddr+hl, " "); 971 seq_printf(m, " ");
976 hl += sprintf(outaddr+hl, "%08X ", array[r]); 972 seq_printf(m, "%08X ", array[r]);
977 if ((r % 8) == 7) 973 if ((r % 8) == 7)
978 hl += sprintf(outaddr+hl, "\n"); 974 seq_putc(m, '\n');
979 } 975 }
980 hl += sprintf(outaddr+hl, "\n"); 976 seq_putc(m, '\n');
981 return hl;
982} 977}
983 978
984static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, 979static int zcrypt_proc_show(struct seq_file *m, void *v)
985 int count, int *eof, void *data)
986{ 980{
987 unsigned char *workarea; 981 char workarea[sizeof(int) * AP_DEVICES];
988 int len; 982
989 983 seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
990 len = 0; 984 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
991 985 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
992 /* resp_buff is a page. Use the right half for a work area */ 986 seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
993 workarea = resp_buff + 2000; 987 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
994 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", 988 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
995 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 989 seq_printf(m, "PCIXCC MCL2 count: %d\n",
996 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", 990 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
997 ap_domain_index); 991 seq_printf(m, "PCIXCC MCL3 count: %d\n",
998 len += sprintf(resp_buff + len, "Total device count: %d\n", 992 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
999 zcrypt_device_count); 993 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1000 len += sprintf(resp_buff + len, "PCICA count: %d\n", 994 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1001 zcrypt_count_type(ZCRYPT_PCICA)); 995 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1002 len += sprintf(resp_buff + len, "PCICC count: %d\n", 996 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1003 zcrypt_count_type(ZCRYPT_PCICC)); 997 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1004 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", 998 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1005 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 999 seq_printf(m, "Total open handles: %d\n\n",
1006 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", 1000 atomic_read(&zcrypt_open_count));
1007 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1008 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
1009 zcrypt_count_type(ZCRYPT_CEX2C));
1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX2A));
1012 len += sprintf(resp_buff + len, "requestq count: %d\n",
1013 zcrypt_requestq_count());
1014 len += sprintf(resp_buff + len, "pendingq count: %d\n",
1015 zcrypt_pendingq_count());
1016 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
1017 atomic_read(&zcrypt_open_count));
1018 zcrypt_status_mask(workarea); 1001 zcrypt_status_mask(workarea);
1019 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1002 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1020 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", 1003 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1021 resp_buff+len, workarea, AP_DEVICES); 1004 m, workarea, AP_DEVICES);
1022 zcrypt_qdepth_mask(workarea); 1005 zcrypt_qdepth_mask(workarea);
1023 len += sprinthx("Waiting work element counts", 1006 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1024 resp_buff+len, workarea, AP_DEVICES);
1025 zcrypt_perdev_reqcnt((int *) workarea); 1007 zcrypt_perdev_reqcnt((int *) workarea);
1026 len += sprinthx4("Per-device successfully completed request counts", 1008 sprinthx4("Per-device successfully completed request counts",
1027 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 1009 m, (unsigned int *) workarea, AP_DEVICES);
1028 *eof = 1; 1010 return 0;
1029 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); 1011}
1030 return len; 1012
1013static int zcrypt_proc_open(struct inode *inode, struct file *file)
1014{
1015 return single_open(file, zcrypt_proc_show, NULL);
1031} 1016}
1032 1017
1033static void zcrypt_disable_card(int index) 1018static void zcrypt_disable_card(int index)
@@ -1057,11 +1042,11 @@ static void zcrypt_enable_card(int index)
1057 spin_unlock_bh(&zcrypt_device_lock); 1042 spin_unlock_bh(&zcrypt_device_lock);
1058} 1043}
1059 1044
1060static int zcrypt_status_write(struct file *file, const char __user *buffer, 1045static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1061 unsigned long count, void *data) 1046 size_t count, loff_t *pos)
1062{ 1047{
1063 unsigned char *lbuf, *ptr; 1048 unsigned char *lbuf, *ptr;
1064 unsigned long local_count; 1049 size_t local_count;
1065 int j; 1050 int j;
1066 1051
1067 if (count <= 0) 1052 if (count <= 0)
@@ -1095,8 +1080,9 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1095 * '0' for no device, '1' for PCICA, '2' for PCICC, 1080 * '0' for no device, '1' for PCICA, '2' for PCICC,
1096 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1081 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1097 * '5' for CEX2C and '6' for CEX2A' 1082 * '5' for CEX2C and '6' for CEX2A'
1083 * '7' for CEX3C and '8' for CEX3A
1098 */ 1084 */
1099 if (*ptr >= '0' && *ptr <= '6') 1085 if (*ptr >= '0' && *ptr <= '8')
1100 j++; 1086 j++;
1101 else if (*ptr == 'd' || *ptr == 'D') 1087 else if (*ptr == 'd' || *ptr == 'D')
1102 zcrypt_disable_card(j++); 1088 zcrypt_disable_card(j++);
@@ -1110,6 +1096,15 @@ out:
1110 return count; 1096 return count;
1111} 1097}
1112 1098
1099static const struct file_operations zcrypt_proc_fops = {
1100 .owner = THIS_MODULE,
1101 .open = zcrypt_proc_open,
1102 .read = seq_read,
1103 .llseek = seq_lseek,
1104 .release = single_release,
1105 .write = zcrypt_proc_write,
1106};
1107
1113static int zcrypt_rng_device_count; 1108static int zcrypt_rng_device_count;
1114static u32 *zcrypt_rng_buffer; 1109static u32 *zcrypt_rng_buffer;
1115static int zcrypt_rng_buffer_index; 1110static int zcrypt_rng_buffer_index;
@@ -1192,14 +1187,11 @@ int __init zcrypt_api_init(void)
1192 goto out; 1187 goto out;
1193 1188
1194 /* Set up the proc file system */ 1189 /* Set up the proc file system */
1195 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1190 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
1196 if (!zcrypt_entry) { 1191 if (!zcrypt_entry) {
1197 rc = -ENOMEM; 1192 rc = -ENOMEM;
1198 goto out_misc; 1193 goto out_misc;
1199 } 1194 }
1200 zcrypt_entry->data = NULL;
1201 zcrypt_entry->read_proc = zcrypt_status_read;
1202 zcrypt_entry->write_proc = zcrypt_status_write;
1203 1195
1204 return 0; 1196 return 0;
1205 1197
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 1d1ec74dadb2..8e7ffbf2466c 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -71,6 +71,8 @@ struct ica_z90_status {
71#define ZCRYPT_PCIXCC_MCL3 4 71#define ZCRYPT_PCIXCC_MCL3 4
72#define ZCRYPT_CEX2C 5 72#define ZCRYPT_CEX2C 5
73#define ZCRYPT_CEX2A 6 73#define ZCRYPT_CEX2A 6
74#define ZCRYPT_CEX3C 7
75#define ZCRYPT_CEX3A 8
74 76
75/** 77/**
76 * Large random numbers are pulled in 4096 byte chunks from the crypto cards 78 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 326ea08f67c9..9c409efa1ecf 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h>
30#include <linux/init.h> 31#include <linux/init.h>
31#include <linux/err.h> 32#include <linux/err.h>
32#include <asm/atomic.h> 33#include <asm/atomic.h>
@@ -39,17 +40,24 @@
39 40
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
44#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
42 45
43#define CEX2A_SPEED_RATING 970 46#define CEX2A_SPEED_RATING 970
47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
44 48
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47 51
52#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
53#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
54
48#define CEX2A_CLEANUP_TIME (15*HZ) 55#define CEX2A_CLEANUP_TIME (15*HZ)
56#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
49 57
50static struct ap_device_id zcrypt_cex2a_ids[] = { 58static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 59 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, 60 { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
53 { /* end of list */ }, 61 { /* end of list */ },
54}; 62};
55 63
@@ -298,6 +306,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
298 struct completion work; 306 struct completion work;
299 int rc; 307 int rc;
300 308
309 ap_init_message(&ap_msg);
301 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 310 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
302 if (!ap_msg.message) 311 if (!ap_msg.message)
303 return -ENOMEM; 312 return -ENOMEM;
@@ -335,6 +344,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
335 struct completion work; 344 struct completion work;
336 int rc; 345 int rc;
337 346
347 ap_init_message(&ap_msg);
338 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 348 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
339 if (!ap_msg.message) 349 if (!ap_msg.message)
340 return -ENOMEM; 350 return -ENOMEM;
@@ -373,31 +383,45 @@ static struct zcrypt_ops zcrypt_cex2a_ops = {
373 */ 383 */
374static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 384static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
375{ 385{
376 struct zcrypt_device *zdev; 386 struct zcrypt_device *zdev = NULL;
377 int rc; 387 int rc = 0;
378 388
379 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 389 switch (ap_dev->device_type) {
380 if (!zdev) 390 case AP_DEVICE_TYPE_CEX2A:
381 return -ENOMEM; 391 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
382 zdev->ap_dev = ap_dev; 392 if (!zdev)
383 zdev->ops = &zcrypt_cex2a_ops; 393 return -ENOMEM;
384 zdev->online = 1; 394 zdev->user_space_type = ZCRYPT_CEX2A;
385 zdev->user_space_type = ZCRYPT_CEX2A; 395 zdev->type_string = "CEX2A";
386 zdev->type_string = "CEX2A"; 396 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
387 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 397 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
388 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 398 zdev->short_crt = 1;
389 zdev->short_crt = 1; 399 zdev->speed_rating = CEX2A_SPEED_RATING;
390 zdev->speed_rating = CEX2A_SPEED_RATING; 400 break;
391 ap_dev->reply = &zdev->reply; 401 case AP_DEVICE_TYPE_CEX3A:
392 ap_dev->private = zdev; 402 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
393 rc = zcrypt_device_register(zdev); 403 if (!zdev)
394 if (rc) 404 return -ENOMEM;
395 goto out_free; 405 zdev->user_space_type = ZCRYPT_CEX3A;
396 return 0; 406 zdev->type_string = "CEX3A";
397 407 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
398out_free: 408 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
399 ap_dev->private = NULL; 409 zdev->short_crt = 1;
400 zcrypt_device_free(zdev); 410 zdev->speed_rating = CEX3A_SPEED_RATING;
411 break;
412 }
413 if (zdev != NULL) {
414 zdev->ap_dev = ap_dev;
415 zdev->ops = &zcrypt_cex2a_ops;
416 zdev->online = 1;
417 ap_dev->reply = &zdev->reply;
418 ap_dev->private = zdev;
419 rc = zcrypt_device_register(zdev);
420 }
421 if (rc) {
422 ap_dev->private = NULL;
423 zcrypt_device_free(zdev);
424 }
401 return rc; 425 return rc;
402} 426}
403 427
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 17ba81b58c78..09e934b295a0 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h>
30#include <linux/init.h> 31#include <linux/init.h>
31#include <linux/err.h> 32#include <linux/err.h>
32#include <asm/atomic.h> 33#include <asm/atomic.h>
@@ -281,6 +282,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
281 struct completion work; 282 struct completion work;
282 int rc; 283 int rc;
283 284
285 ap_init_message(&ap_msg);
284 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 286 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
285 if (!ap_msg.message) 287 if (!ap_msg.message)
286 return -ENOMEM; 288 return -ENOMEM;
@@ -318,6 +320,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct completion work; 320 struct completion work;
319 int rc; 321 int rc;
320 322
323 ap_init_message(&ap_msg);
321 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 324 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
322 if (!ap_msg.message) 325 if (!ap_msg.message)
323 return -ENOMEM; 326 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f4b0c4795434..9dec5c77cff4 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/gfp.h>
31#include <linux/err.h> 32#include <linux/err.h>
32#include <asm/atomic.h> 33#include <asm/atomic.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
@@ -373,6 +374,8 @@ static int convert_type86(struct zcrypt_device *zdev,
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 374 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN; 375 return -EAGAIN;
375 } 376 }
377 if (service_rc == 8 && service_rs == 72)
378 return -EINVAL;
376 zdev->online = 0; 379 zdev->online = 0;
377 return -EAGAIN; /* repeat the request on a different device. */ 380 return -EAGAIN; /* repeat the request on a different device. */
378 } 381 }
@@ -483,6 +486,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
483 struct completion work; 486 struct completion work;
484 int rc; 487 int rc;
485 488
489 ap_init_message(&ap_msg);
486 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 490 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
487 if (!ap_msg.message) 491 if (!ap_msg.message)
488 return -ENOMEM; 492 return -ENOMEM;
@@ -521,6 +525,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
521 struct completion work; 525 struct completion work;
522 int rc; 526 int rc;
523 527
528 ap_init_message(&ap_msg);
524 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 529 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
525 if (!ap_msg.message) 530 if (!ap_msg.message)
526 return -ENOMEM; 531 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 5677b40e4ac0..510fab4577d4 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/slab.h>
33#include <asm/atomic.h> 34#include <asm/atomic.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35 36
@@ -43,10 +44,13 @@
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 44#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 45#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 46#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
48#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
46 49
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */ 50#define PCIXCC_MCL2_SPEED_RATING 7870
48#define PCIXCC_MCL3_SPEED_RATING 7870 51#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540 52#define CEX2C_SPEED_RATING 7000
53#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
50 54
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 55#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -72,7 +76,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 76static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 77 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 78 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, 79 { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
76 { /* end of list */ }, 80 { /* end of list */ },
77}; 81};
78 82
@@ -326,6 +330,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 330 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 331 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
328 332
333 if (memcmp(function_code, "US", 2) == 0)
334 ap_msg->special = 1;
335 else
336 ap_msg->special = 0;
337
329 /* copy data block */ 338 /* copy data block */
330 if (xcRB->request_data_length && 339 if (xcRB->request_data_length &&
331 copy_from_user(req_data, xcRB->request_data_address, 340 copy_from_user(req_data, xcRB->request_data_address,
@@ -462,6 +471,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
462 } 471 }
463 if (service_rc == 12 && service_rs == 769) 472 if (service_rc == 12 && service_rs == 769)
464 return -EINVAL; 473 return -EINVAL;
474 if (service_rc == 8 && service_rs == 72)
475 return -EINVAL;
465 zdev->online = 0; 476 zdev->online = 0;
466 return -EAGAIN; /* repeat the request on a different device. */ 477 return -EAGAIN; /* repeat the request on a different device. */
467 } 478 }
@@ -688,6 +699,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
688 }; 699 };
689 int rc; 700 int rc;
690 701
702 ap_init_message(&ap_msg);
691 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 703 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
692 if (!ap_msg.message) 704 if (!ap_msg.message)
693 return -ENOMEM; 705 return -ENOMEM;
@@ -727,6 +739,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
727 }; 739 };
728 int rc; 740 int rc;
729 741
742 ap_init_message(&ap_msg);
730 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 743 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
731 if (!ap_msg.message) 744 if (!ap_msg.message)
732 return -ENOMEM; 745 return -ENOMEM;
@@ -766,6 +779,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
766 }; 779 };
767 int rc; 780 int rc;
768 781
782 ap_init_message(&ap_msg);
769 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 783 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
770 if (!ap_msg.message) 784 if (!ap_msg.message)
771 return -ENOMEM; 785 return -ENOMEM;
@@ -805,6 +819,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
805 }; 819 };
806 int rc; 820 int rc;
807 821
822 ap_init_message(&ap_msg);
808 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 823 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
809 if (!ap_msg.message) 824 if (!ap_msg.message)
810 return -ENOMEM; 825 return -ENOMEM;
@@ -972,6 +987,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
972 } __attribute__((packed)) *reply; 987 } __attribute__((packed)) *reply;
973 int rc, i; 988 int rc, i;
974 989
990 ap_init_message(&ap_msg);
975 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 991 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
976 if (!ap_msg.message) 992 if (!ap_msg.message)
977 return -ENOMEM; 993 return -ENOMEM;
@@ -1016,14 +1032,15 @@ out_free:
1016static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 1032static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1017{ 1033{
1018 struct zcrypt_device *zdev; 1034 struct zcrypt_device *zdev;
1019 int rc; 1035 int rc = 0;
1020 1036
1021 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE); 1037 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
1022 if (!zdev) 1038 if (!zdev)
1023 return -ENOMEM; 1039 return -ENOMEM;
1024 zdev->ap_dev = ap_dev; 1040 zdev->ap_dev = ap_dev;
1025 zdev->online = 1; 1041 zdev->online = 1;
1026 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { 1042 switch (ap_dev->device_type) {
1043 case AP_DEVICE_TYPE_PCIXCC:
1027 rc = zcrypt_pcixcc_mcl(ap_dev); 1044 rc = zcrypt_pcixcc_mcl(ap_dev);
1028 if (rc < 0) { 1045 if (rc < 0) {
1029 zcrypt_device_free(zdev); 1046 zcrypt_device_free(zdev);
@@ -1041,13 +1058,25 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1041 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1058 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1042 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1059 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1043 } 1060 }
1044 } else { 1061 break;
1062 case AP_DEVICE_TYPE_CEX2C:
1045 zdev->user_space_type = ZCRYPT_CEX2C; 1063 zdev->user_space_type = ZCRYPT_CEX2C;
1046 zdev->type_string = "CEX2C"; 1064 zdev->type_string = "CEX2C";
1047 zdev->speed_rating = CEX2C_SPEED_RATING; 1065 zdev->speed_rating = CEX2C_SPEED_RATING;
1048 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1066 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1049 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1067 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1068 break;
1069 case AP_DEVICE_TYPE_CEX3C:
1070 zdev->user_space_type = ZCRYPT_CEX3C;
1071 zdev->type_string = "CEX3C";
1072 zdev->speed_rating = CEX3C_SPEED_RATING;
1073 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1074 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1075 break;
1076 default:
1077 goto out_free;
1050 } 1078 }
1079
1051 rc = zcrypt_pcixcc_rng_supported(ap_dev); 1080 rc = zcrypt_pcixcc_rng_supported(ap_dev);
1052 if (rc < 0) { 1081 if (rc < 0) {
1053 zcrypt_device_free(zdev); 1082 zcrypt_device_free(zdev);
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2930fc763ac5..4e298bc8949d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -15,6 +15,7 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/virtio.h> 16#include <linux/virtio.h>
17#include <linux/virtio_config.h> 17#include <linux/virtio_config.h>
18#include <linux/slab.h>
18#include <linux/virtio_console.h> 19#include <linux/virtio_console.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/virtio_ring.h> 21#include <linux/virtio_ring.h>
@@ -340,11 +341,11 @@ static void kvm_extint_handler(u16 code)
340 return; 341 return;
341 342
342 /* The LSB might be overloaded, we have to mask it */ 343 /* The LSB might be overloaded, we have to mask it */
343 vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); 344 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
344 345
345 /* We use the LSB of extparam, to decide, if this interrupt is a config 346 /* We use the LSB of extparam, to decide, if this interrupt is a config
346 * change or a "standard" interrupt */ 347 * change or a "standard" interrupt */
347 config_changed = (*(int *) __LC_EXT_PARAMS & 1); 348 config_changed = S390_lowcore.ext_params & 1;
348 349
349 if (config_changed) { 350 if (config_changed) {
350 struct virtio_driver *drv; 351 struct virtio_driver *drv;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index cb909a5b5047..977bb4d4ed15 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -43,6 +43,16 @@ config SMSGIUCV
43 Select this option if you want to be able to receive SMSG messages 43 Select this option if you want to be able to receive SMSG messages
44 from other VM guest systems. 44 from other VM guest systems.
45 45
46config SMSGIUCV_EVENT
47 tristate "Deliver IUCV special messages as uevents (VM only)"
48 depends on SMSGIUCV
49 help
50 Select this option to deliver CP special messages (SMSGs) as
51 uevents. The driver handles only those special messages that
52 start with "APP".
53
54 To compile as a module, choose M. The module name is "smsgiucv_app".
55
46config CLAW 56config CLAW
47 tristate "CLAW device support" 57 tristate "CLAW device support"
48 depends on CCW && NETDEVICES 58 depends on CCW && NETDEVICES
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..4dfe8c1092da 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,12 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_LCS) += lcs.o
11obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 12qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 13obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 14qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..147bb1a69aba 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("claw");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 1ca58f153470..d962fd741a23 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -10,7 +10,6 @@
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/ctype.h> 13#include <linux/ctype.h>
15#include <linux/sysctl.h> 14#include <linux/sysctl.h>
16#include <linux/module.h> 15#include <linux/module.h>
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c5b83874500c..e35713dd0504 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,16 +1529,21 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
1532 struct ccw_device *cdev1; 1536 struct ccw_device *cdev1;
1537 struct channel *readc;
1538 struct channel *writec;
1533 int ret; 1539 int ret;
1540 int result;
1534 1541
1535 priv = dev_get_drvdata(&cgdev->dev); 1542 priv = dev_get_drvdata(&cgdev->dev);
1536 if (!priv) 1543 if (!priv) {
1537 return -ENODEV; 1544 result = -ENODEV;
1545 goto out_err_result;
1546 }
1538 1547
1539 cdev0 = cgdev->cdev[0]; 1548 cdev0 = cgdev->cdev[0];
1540 cdev1 = cgdev->cdev[1]; 1549 cdev1 = cgdev->cdev[1];
@@ -1545,31 +1554,40 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1545 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); 1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1546 1555
1547 ret = add_channel(cdev0, type, priv); 1556 ret = add_channel(cdev0, type, priv);
1548 if (ret) 1557 if (ret) {
1549 return ret; 1558 result = ret;
1559 goto out_err_result;
1560 }
1550 ret = add_channel(cdev1, type, priv); 1561 ret = add_channel(cdev1, type, priv);
1551 if (ret) 1562 if (ret) {
1552 return ret; 1563 result = ret;
1564 goto out_remove_channel1;
1565 }
1553 1566
1554 ret = ccw_device_set_online(cdev0); 1567 ret = ccw_device_set_online(cdev0);
1555 if (ret != 0) { 1568 if (ret != 0) {
1556 /* may be ok to fail now - can be done later */
1557 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1569 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1558 "%s(%s) set_online rc=%d", 1570 "%s(%s) set_online rc=%d",
1559 CTCM_FUNTAIL, read_id, ret); 1571 CTCM_FUNTAIL, read_id, ret);
1572 result = -EIO;
1573 goto out_remove_channel2;
1560 } 1574 }
1561 1575
1562 ret = ccw_device_set_online(cdev1); 1576 ret = ccw_device_set_online(cdev1);
1563 if (ret != 0) { 1577 if (ret != 0) {
1564 /* may be ok to fail now - can be done later */
1565 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1578 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1566 "%s(%s) set_online rc=%d", 1579 "%s(%s) set_online rc=%d",
1567 CTCM_FUNTAIL, write_id, ret); 1580 CTCM_FUNTAIL, write_id, ret);
1581
1582 result = -EIO;
1583 goto out_ccw1;
1568 } 1584 }
1569 1585
1570 dev = ctcm_init_netdevice(priv); 1586 dev = ctcm_init_netdevice(priv);
1571 if (dev == NULL) 1587 if (dev == NULL) {
1572 goto out; 1588 result = -ENODEV;
1589 goto out_ccw2;
1590 }
1573 1591
1574 for (direction = READ; direction <= WRITE; direction++) { 1592 for (direction = READ; direction <= WRITE; direction++) {
1575 priv->channel[direction] = 1593 priv->channel[direction] =
@@ -1587,12 +1605,14 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1587 /* sysfs magic */ 1605 /* sysfs magic */
1588 SET_NETDEV_DEV(dev, &cgdev->dev); 1606 SET_NETDEV_DEV(dev, &cgdev->dev);
1589 1607
1590 if (register_netdev(dev)) 1608 if (register_netdev(dev)) {
1591 goto out_dev; 1609 result = -ENODEV;
1610 goto out_dev;
1611 }
1592 1612
1593 if (ctcm_add_attributes(&cgdev->dev)) { 1613 if (ctcm_add_attributes(&cgdev->dev)) {
1594 unregister_netdev(dev); 1614 result = -ENODEV;
1595 goto out_dev; 1615 goto out_unregister;
1596 } 1616 }
1597 1617
1598 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1618 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
@@ -1608,13 +1628,22 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1608 priv->channel[WRITE]->id, priv->protocol); 1628 priv->channel[WRITE]->id, priv->protocol);
1609 1629
1610 return 0; 1630 return 0;
1631out_unregister:
1632 unregister_netdev(dev);
1611out_dev: 1633out_dev:
1612 ctcm_free_netdevice(dev); 1634 ctcm_free_netdevice(dev);
1613out: 1635out_ccw2:
1614 ccw_device_set_offline(cgdev->cdev[1]); 1636 ccw_device_set_offline(cgdev->cdev[1]);
1637out_ccw1:
1615 ccw_device_set_offline(cgdev->cdev[0]); 1638 ccw_device_set_offline(cgdev->cdev[0]);
1616 1639out_remove_channel2:
1617 return -ENODEV; 1640 readc = channel_get(type, read_id, READ);
1641 channel_remove(readc);
1642out_remove_channel1:
1643 writec = channel_get(type, write_id, WRITE);
1644 channel_remove(writec);
1645out_err_result:
1646 return result;
1618} 1647}
1619 1648
1620/** 1649/**
@@ -1695,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1695 return 0; 1724 return 0;
1696 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1697 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1698 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1699 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1700 return 0; 1734 return 0;
@@ -1719,6 +1753,22 @@ err_out:
1719 return rc; 1753 return rc;
1720} 1754}
1721 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1722static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1723 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1724 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1733,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1733 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1734}; 1784};
1735 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static const struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1736 1813
1737/* 1814/*
1738 * Module related routines 1815 * Module related routines
@@ -1746,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1746 */ 1823 */
1747static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1748{ 1825{
1749 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1750 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1751 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1752} 1832}
@@ -1772,17 +1852,31 @@ static int __init ctcm_init(void)
1772 channels = NULL; 1852 channels = NULL;
1773 1853
1774 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1775 if (ret) { 1855 if (ret)
1776 return ret; 1856 goto out_err;
1777 } 1857 ctcm_root_dev = root_device_register("ctcm");
1778 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1779 if (ret) { 1859 if (ret)
1780 ctcm_unregister_dbf_views(); 1860 goto register_err;
1781 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1782 __func__, ret); 1862 if (ret)
1783 return ret; 1863 goto ccw_err;
1784 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1785 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1786 return ret; 1880 return ret;
1787} 1881}
1788 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..2b24550e865e 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -14,6 +14,7 @@
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 15
16#include <linux/sysfs.h> 16#include <linux/sysfs.h>
17#include <linux/slab.h>
17#include "ctcm_main.h" 18#include "ctcm_main.h"
18 19
19/* 20/*
@@ -158,6 +159,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 159 return count;
159} 160}
160 161
162const char *ctcm_type[] = {
163 "not a channel",
164 "CTC/A",
165 "FICON channel",
166 "ESCON channel",
167 "unknown channel type",
168 "unsupported channel type",
169};
170
161static ssize_t ctcm_type_show(struct device *dev, 171static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 172 struct device_attribute *attr, char *buf)
163{ 173{
@@ -168,7 +178,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 178 return -ENODEV;
169 179
170 return sprintf(buf, "%s\n", 180 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 181 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 182}
173 183
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 184static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..e5dea67f902e 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -5,6 +5,7 @@
5 5
6#include "fsm.h" 6#include "fsm.h"
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/slab.h>
8#include <linux/timer.h> 9#include <linux/timer.h>
9 10
10MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 11MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
@@ -27,6 +28,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 28 return NULL;
28 } 29 }
29 strlcpy(this->name, name, sizeof(this->name)); 30 strlcpy(this->name, name, sizeof(this->name));
31 init_waitqueue_head(&this->wait_q);
30 32
31 f = kzalloc(sizeof(fsm), order); 33 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 34 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a70de9b4bf29..9b19ea13b4d8 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -37,6 +37,7 @@
37#include <linux/igmp.h> 37#include <linux/igmp.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/kthread.h> 39#include <linux/kthread.h>
40#include <linux/slab.h>
40#include <net/arp.h> 41#include <net/arp.h>
41#include <net/ip.h> 42#include <net/ip.h>
42 43
@@ -47,7 +48,6 @@
47#include <asm/ccwgroup.h> 48#include <asm/ccwgroup.h>
48 49
49#include "lcs.h" 50#include "lcs.h"
50#include "cu3088.h"
51 51
52 52
53#if !defined(CONFIG_NET_ETHERNET) && \ 53#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +60,11 @@
60 */ 60 */
61 61
62static char version[] __initdata = "LCS driver"; 62static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 63
64/**
65 * the root device for lcs group devices
66 */
67static struct device *lcs_root_dev;
64 68
65/** 69/**
66 * Some prototypes. 70 * Some prototypes.
@@ -76,6 +80,7 @@ static int lcs_recovery(void *ptr);
76/** 80/**
77 * Debug Facility Stuff 81 * Debug Facility Stuff
78 */ 82 */
83static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 84static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 85static debug_info_t *lcs_dbf_trace;
81 86
@@ -889,7 +894,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
889 rc = lcs_ready_buffer(&card->write, buffer); 894 rc = lcs_ready_buffer(&card->write, buffer);
890 if (rc) 895 if (rc)
891 return rc; 896 return rc;
892 init_timer(&timer); 897 init_timer_on_stack(&timer);
893 timer.function = lcs_lancmd_timeout; 898 timer.function = lcs_lancmd_timeout;
894 timer.data = (unsigned long) reply; 899 timer.data = (unsigned long) reply;
895 timer.expires = jiffies + HZ*card->lancmd_timeout; 900 timer.expires = jiffies + HZ*card->lancmd_timeout;
@@ -1968,6 +1973,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1973
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1974static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1975
1976const char *lcs_type[] = {
1977 "not a channel",
1978 "2216 parallel",
1979 "2216 channel",
1980 "OSA LCS card",
1981 "unknown channel type",
1982 "unsupported channel type",
1983};
1984
1971static ssize_t 1985static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1986lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1987{
@@ -1977,7 +1991,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1991 if (!cgdev)
1978 return -ENODEV; 1992 return -ENODEV;
1979 1993
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1994 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1995}
1982 1996
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1997static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2130,8 +2144,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2130 card->write.ccwdev = ccwgdev->cdev[1]; 2144 card->write.ccwdev = ccwgdev->cdev[1];
2131 2145
2132 recover_state = card->state; 2146 recover_state = card->state;
2133 ccw_device_set_online(card->read.ccwdev); 2147 rc = ccw_device_set_online(card->read.ccwdev);
2134 ccw_device_set_online(card->write.ccwdev); 2148 if (rc)
2149 goto out_err;
2150 rc = ccw_device_set_online(card->write.ccwdev);
2151 if (rc)
2152 goto out_werr;
2135 2153
2136 LCS_DBF_TEXT(3, setup, "lcsnewdv"); 2154 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2137 2155
@@ -2210,8 +2228,10 @@ netdev_out:
2210 return 0; 2228 return 0;
2211out: 2229out:
2212 2230
2213 ccw_device_set_offline(card->read.ccwdev);
2214 ccw_device_set_offline(card->write.ccwdev); 2231 ccw_device_set_offline(card->write.ccwdev);
2232out_werr:
2233 ccw_device_set_offline(card->read.ccwdev);
2234out_err:
2215 return -ENODEV; 2235 return -ENODEV;
2216} 2236}
2217 2237
@@ -2364,6 +2384,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2364 return lcs_pm_resume(card); 2384 return lcs_pm_resume(card);
2365} 2385}
2366 2386
2387static struct ccw_device_id lcs_ids[] = {
2388 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2389 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2390 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2391 {},
2392};
2393MODULE_DEVICE_TABLE(ccw, lcs_ids);
2394
2395static struct ccw_driver lcs_ccw_driver = {
2396 .owner = THIS_MODULE,
2397 .name = "lcs",
2398 .ids = lcs_ids,
2399 .probe = ccwgroup_probe_ccwdev,
2400 .remove = ccwgroup_remove_ccwdev,
2401};
2402
2367/** 2403/**
2368 * LCS ccwgroup driver registration 2404 * LCS ccwgroup driver registration
2369 */ 2405 */
@@ -2383,6 +2419,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2383 .restore = lcs_restore, 2419 .restore = lcs_restore,
2384}; 2420};
2385 2421
2422static ssize_t
2423lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2424 size_t count)
2425{
2426 int err;
2427 err = ccwgroup_create_from_string(lcs_root_dev,
2428 lcs_group_driver.driver_id,
2429 &lcs_ccw_driver, 2, buf);
2430 return err ? err : count;
2431}
2432
2433static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2434
2435static struct attribute *lcs_group_attrs[] = {
2436 &driver_attr_group.attr,
2437 NULL,
2438};
2439
2440static struct attribute_group lcs_group_attr_group = {
2441 .attrs = lcs_group_attrs,
2442};
2443
2444static const struct attribute_group *lcs_group_attr_groups[] = {
2445 &lcs_group_attr_group,
2446 NULL,
2447};
2448
2386/** 2449/**
2387 * LCS Module/Kernel initialization function 2450 * LCS Module/Kernel initialization function
2388 */ 2451 */
@@ -2394,17 +2457,30 @@ __init lcs_init_module(void)
2394 pr_info("Loading %s\n", version); 2457 pr_info("Loading %s\n", version);
2395 rc = lcs_register_debug_facility(); 2458 rc = lcs_register_debug_facility();
2396 LCS_DBF_TEXT(0, setup, "lcsinit"); 2459 LCS_DBF_TEXT(0, setup, "lcsinit");
2397 if (rc) { 2460 if (rc)
2398 pr_err("Initialization failed\n"); 2461 goto out_err;
2399 return rc; 2462 lcs_root_dev = root_device_register("lcs");
2400 } 2463 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2401 2464 if (rc)
2402 rc = register_cu3088_discipline(&lcs_group_driver); 2465 goto register_err;
2403 if (rc) { 2466 rc = ccw_driver_register(&lcs_ccw_driver);
2404 pr_err("Initialization failed\n"); 2467 if (rc)
2405 return rc; 2468 goto ccw_err;
2406 } 2469 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2470 rc = ccwgroup_driver_register(&lcs_group_driver);
2471 if (rc)
2472 goto ccwgroup_err;
2407 return 0; 2473 return 0;
2474
2475ccwgroup_err:
2476 ccw_driver_unregister(&lcs_ccw_driver);
2477ccw_err:
2478 root_device_unregister(lcs_root_dev);
2479register_err:
2480 lcs_unregister_debug_facility();
2481out_err:
2482 pr_err("Initializing the lcs device driver failed\n");
2483 return rc;
2408} 2484}
2409 2485
2410 2486
@@ -2416,7 +2492,11 @@ __exit lcs_cleanup_module(void)
2416{ 2492{
2417 pr_info("Terminating lcs module.\n"); 2493 pr_info("Terminating lcs module.\n");
2418 LCS_DBF_TEXT(0, trace, "cleanup"); 2494 LCS_DBF_TEXT(0, trace, "cleanup");
2419 unregister_cu3088_discipline(&lcs_group_driver); 2495 driver_remove_file(&lcs_group_driver.driver,
2496 &driver_attr_group);
2497 ccwgroup_driver_unregister(&lcs_group_driver);
2498 ccw_driver_unregister(&lcs_ccw_driver);
2499 root_device_unregister(lcs_root_dev);
2420 lcs_unregister_debug_facility(); 2500 lcs_unregister_debug_facility();
2421} 2501}
2422 2502
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..65ebee0a3266 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
113#define IUCV_DBF_TEXT_(name, level, text...) \ 113#define IUCV_DBF_TEXT_(name, level, text...) \
114 do { \ 114 do { \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* iucv_dbf_txt_buf = \ 116 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
117 get_cpu_var(iucv_dbf_txt_buf); \ 117 sprintf(__buf, text); \
118 sprintf(iucv_dbf_txt_buf, text); \ 118 debug_text_event(iucv_dbf_##name, level, __buf); \
119 debug_text_event(iucv_dbf_##name, level, \
120 iucv_dbf_txt_buf); \
121 put_cpu_var(iucv_dbf_txt_buf); \ 119 put_cpu_var(iucv_dbf_txt_buf); \
122 } \ 120 } \
123 } while (0) 121 } while (0)
@@ -161,7 +159,7 @@ static void netiucv_pm_complete(struct device *);
161static int netiucv_pm_freeze(struct device *); 159static int netiucv_pm_freeze(struct device *);
162static int netiucv_pm_restore_thaw(struct device *); 160static int netiucv_pm_restore_thaw(struct device *);
163 161
164static struct dev_pm_ops netiucv_pm_ops = { 162static const struct dev_pm_ops netiucv_pm_ops = {
165 .prepare = netiucv_pm_prepare, 163 .prepare = netiucv_pm_prepare,
166 .complete = netiucv_pm_complete, 164 .complete = netiucv_pm_complete,
167 .freeze = netiucv_pm_freeze, 165 .freeze = netiucv_pm_freeze,
@@ -741,13 +739,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 739 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 740 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 741 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 742 if (privptr) {
746 privptr->stats.tx_packets++; 743 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 744 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 745 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 746 - NETIUCV_HDRLEN);
750 } 747 }
748 dev_kfree_skb_any(skb);
751 } 749 }
752 } 750 }
753 conn->tx_buff->data = conn->tx_buff->head; 751 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 31a2b4e502ce..fcd005aad989 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,8 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
652 int sniffer;
651}; 653};
652 654
653/* 655/*
@@ -736,6 +738,7 @@ struct qeth_card {
736 struct qeth_discipline discipline; 738 struct qeth_discipline discipline;
737 atomic_t force_alloc_skb; 739 atomic_t force_alloc_skb;
738 struct service_level qeth_service_level; 740 struct service_level qeth_service_level;
741 struct qdio_ssqd_desc ssqd;
739}; 742};
740 743
741struct qeth_card_list_struct { 744struct qeth_card_list_struct {
@@ -760,7 +763,8 @@ static inline int qeth_get_micros(void)
760 763
761static inline int qeth_get_ip_version(struct sk_buff *skb) 764static inline int qeth_get_ip_version(struct sk_buff *skb)
762{ 765{
763 switch (skb->protocol) { 766 struct ethhdr *ehdr = (struct ethhdr *)skb->data;
767 switch (ehdr->h_proto) {
764 case ETH_P_IPV6: 768 case ETH_P_IPV6:
765 return 6; 769 return 6;
766 case ETH_P_IP: 770 case ETH_P_IP:
@@ -776,7 +780,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 780 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 781}
778 782
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 783extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 784extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 785const char *qeth_get_cardname_short(struct qeth_card *);
@@ -811,7 +814,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 814struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 815 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 816int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *); 817int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
818 unsigned int, const char *);
815void qeth_queue_input_buffer(struct qeth_card *, int); 819void qeth_queue_input_buffer(struct qeth_card *, int);
816struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 820struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
817 struct qdio_buffer *, struct qdio_buffer_element **, int *, 821 struct qdio_buffer *, struct qdio_buffer_element **, int *,
@@ -836,7 +840,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 840struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 841int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 842int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 843struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 844int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 845 unsigned long);
@@ -849,13 +852,14 @@ int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
849 struct sk_buff *, struct qeth_hdr *, int, int, int); 852 struct sk_buff *, struct qeth_hdr *, int, int, int);
850int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 853int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
851 struct sk_buff *, struct qeth_hdr *, int); 854 struct sk_buff *, struct qeth_hdr *, int);
852int qeth_core_get_stats_count(struct net_device *); 855int qeth_core_get_sset_count(struct net_device *, int);
853void qeth_core_get_ethtool_stats(struct net_device *, 856void qeth_core_get_ethtool_stats(struct net_device *,
854 struct ethtool_stats *, u64 *); 857 struct ethtool_stats *, u64 *);
855void qeth_core_get_strings(struct net_device *, u32, u8 *); 858void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 859void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 860void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 861int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
862int qeth_set_access_ctrl_online(struct qeth_card *card);
859 863
860/* exports for OSN */ 864/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 865int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c4a42d970158..3ba738b2e271 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
20#include <linux/tcp.h> 20#include <linux/tcp.h>
21#include <linux/mii.h> 21#include <linux/mii.h>
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/slab.h>
23 24
24#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -269,41 +270,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
269 card->qdio.init_pool.buf_count = bufcnt; 270 card->qdio.init_pool.buf_count = bufcnt;
270 return qeth_alloc_buffer_pool(card); 271 return qeth_alloc_buffer_pool(card);
271} 272}
272 273EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307 274
308static int qeth_issue_next_read(struct qeth_card *card) 275static int qeth_issue_next_read(struct qeth_card *card)
309{ 276{
@@ -385,8 +352,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
385 if (IS_IPA(iob->data)) { 352 if (IS_IPA(iob->data)) {
386 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 353 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
387 if (IS_IPA_REPLY(cmd)) { 354 if (IS_IPA_REPLY(cmd)) {
388 if (cmd->hdr.command < IPA_CMD_SETCCID || 355 if (cmd->hdr.command != IPA_CMD_SETCCID &&
389 cmd->hdr.command > IPA_CMD_MODCCID) 356 cmd->hdr.command != IPA_CMD_DELCCID &&
357 cmd->hdr.command != IPA_CMD_MODCCID &&
358 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
390 qeth_issue_ipa_msg(cmd, 359 qeth_issue_ipa_msg(cmd,
391 cmd->hdr.return_code, card); 360 cmd->hdr.return_code, card);
392 return cmd; 361 return cmd;
@@ -569,7 +538,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
569 dev_err(&card->gdev->dev, 538 dev_err(&card->gdev->dev,
570 "The qeth device is not configured " 539 "The qeth device is not configured "
571 "for the OSI layer required by z/VM\n"); 540 "for the OSI layer required by z/VM\n");
572 qeth_schedule_recovery(card); 541 else
542 qeth_schedule_recovery(card);
573 goto out; 543 goto out;
574 } 544 }
575 545
@@ -1079,6 +1049,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1049 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1050 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1051 card->options.rx_sg_cb = QETH_RX_SG_CB;
1052 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1053}
1083 1054
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1055static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -1134,11 +1105,6 @@ static int qeth_setup_card(struct qeth_card *card)
1134 card->thread_running_mask = 0; 1105 card->thread_running_mask = 0;
1135 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1106 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1136 INIT_LIST_HEAD(&card->ip_list); 1107 INIT_LIST_HEAD(&card->ip_list);
1137 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1138 if (!card->ip_tbd_list) {
1139 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1140 return -ENOMEM;
1141 }
1142 INIT_LIST_HEAD(card->ip_tbd_list); 1108 INIT_LIST_HEAD(card->ip_tbd_list);
1143 INIT_LIST_HEAD(&card->cmd_waiter_list); 1109 INIT_LIST_HEAD(&card->cmd_waiter_list);
1144 init_waitqueue_head(&card->wait_q); 1110 init_waitqueue_head(&card->wait_q);
@@ -1149,8 +1115,6 @@ static int qeth_setup_card(struct qeth_card *card)
1149 card->ipato.enabled = 0; 1115 card->ipato.enabled = 0;
1150 card->ipato.invert4 = 0; 1116 card->ipato.invert4 = 0;
1151 card->ipato.invert6 = 0; 1117 card->ipato.invert6 = 0;
1152 if (card->info.type == QETH_CARD_TYPE_IQD)
1153 card->options.checksum_type = NO_CHECKSUMMING;
1154 /* init QDIO stuff */ 1118 /* init QDIO stuff */
1155 qeth_init_qdio_info(card); 1119 qeth_init_qdio_info(card);
1156 return 0; 1120 return 0;
@@ -1172,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
1172 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1136 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1173 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); 1137 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1174 if (!card) 1138 if (!card)
1175 return NULL; 1139 goto out;
1176 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1140 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1177 if (qeth_setup_channel(&card->read)) { 1141 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1178 kfree(card); 1142 if (!card->ip_tbd_list) {
1179 return NULL; 1143 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1180 } 1144 goto out_card;
1181 if (qeth_setup_channel(&card->write)) {
1182 qeth_clean_channel(&card->read);
1183 kfree(card);
1184 return NULL;
1185 } 1145 }
1146 if (qeth_setup_channel(&card->read))
1147 goto out_ip;
1148 if (qeth_setup_channel(&card->write))
1149 goto out_channel;
1186 card->options.layer2 = -1; 1150 card->options.layer2 = -1;
1187 card->qeth_service_level.seq_print = qeth_core_sl_print; 1151 card->qeth_service_level.seq_print = qeth_core_sl_print;
1188 register_service_level(&card->qeth_service_level); 1152 register_service_level(&card->qeth_service_level);
1189 return card; 1153 return card;
1154
1155out_channel:
1156 qeth_clean_channel(&card->read);
1157out_ip:
1158 kfree(card->ip_tbd_list);
1159out_card:
1160 kfree(card);
1161out:
1162 return NULL;
1190} 1163}
1191 1164
1192static int qeth_determine_card_type(struct qeth_card *card) 1165static int qeth_determine_card_type(struct qeth_card *card)
@@ -1389,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1389 return ret; 1362 return ret;
1390} 1363}
1391 1364
1392static int qeth_get_unitaddr(struct qeth_card *card) 1365static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1393{ 1366{
1394 int length; 1367 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1395 char *prcd;
1396 int rc;
1397
1398 QETH_DBF_TEXT(SETUP, 2, "getunit");
1399 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1400 if (rc) {
1401 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
1402 dev_name(&card->gdev->dev), rc);
1403 return rc;
1404 }
1405 card->info.chpid = prcd[30]; 1368 card->info.chpid = prcd[30];
1406 card->info.unit_addr2 = prcd[31]; 1369 card->info.unit_addr2 = prcd[31];
1407 card->info.cula = prcd[63]; 1370 card->info.cula = prcd[63];
1408 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && 1371 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1409 (prcd[0x11] == _ascebc['M'])); 1372 (prcd[0x11] == _ascebc['M']));
1410 kfree(prcd); 1373}
1411 return 0; 1374
1375static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1376{
1377 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1378
1379 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1380 card->info.blkt.time_total = 250;
1381 card->info.blkt.inter_packet = 5;
1382 card->info.blkt.inter_packet_jumbo = 15;
1383 } else {
1384 card->info.blkt.time_total = 0;
1385 card->info.blkt.inter_packet = 0;
1386 card->info.blkt.inter_packet_jumbo = 0;
1387 }
1412} 1388}
1413 1389
1414static void qeth_init_tokens(struct qeth_card *card) 1390static void qeth_init_tokens(struct qeth_card *card)
@@ -2607,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2607} 2583}
2608EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2584EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2609 2585
2610int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2586int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2611 const char *dbftext) 2587 unsigned int qdio_error, const char *dbftext)
2612{ 2588{
2613 if (qdio_error) { 2589 if (qdio_error) {
2614 QETH_DBF_TEXT(TRACE, 2, dbftext); 2590 QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2618,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2618 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2594 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2619 buf->element[14].flags & 0xff); 2595 buf->element[14].flags & 0xff);
2620 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2596 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2621 return 1; 2597 if ((buf->element[15].flags & 0xff) == 0x12) {
2598 card->stats.rx_dropped++;
2599 return 0;
2600 } else
2601 return 1;
2622 } 2602 }
2623 return 0; 2603 return 0;
2624} 2604}
@@ -2701,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2701 qdio_err = 1; 2681 qdio_err = 1;
2702 } 2682 }
2703 } 2683 }
2704 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); 2684 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
2705 2685
2706 if (!qdio_err) 2686 if (!qdio_err)
2707 return QETH_SEND_ERROR_NONE; 2687 return QETH_SEND_ERROR_NONE;
@@ -3389,10 +3369,161 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3369}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3370EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3371
3372static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3373 struct qeth_reply *reply, unsigned long data)
3374{
3375 struct qeth_ipa_cmd *cmd;
3376 struct qeth_set_access_ctrl *access_ctrl_req;
3377 int rc;
3378
3379 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3380
3381 cmd = (struct qeth_ipa_cmd *) data;
3382 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3383 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3384 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3385 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3386 cmd->data.setadapterparms.hdr.return_code);
3387 switch (cmd->data.setadapterparms.hdr.return_code) {
3388 case SET_ACCESS_CTRL_RC_SUCCESS:
3389 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3390 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3391 {
3392 card->options.isolation = access_ctrl_req->subcmd_code;
3393 if (card->options.isolation == ISOLATION_MODE_NONE) {
3394 dev_info(&card->gdev->dev,
3395 "QDIO data connection isolation is deactivated\n");
3396 } else {
3397 dev_info(&card->gdev->dev,
3398 "QDIO data connection isolation is activated\n");
3399 }
3400 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3401 card->gdev->dev.kobj.name,
3402 access_ctrl_req->subcmd_code,
3403 cmd->data.setadapterparms.hdr.return_code);
3404 rc = 0;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev, "Adapter does not "
3414 "support QDIO data connection isolation\n");
3415
3416 /* ensure isolation mode is "none" */
3417 card->options.isolation = ISOLATION_MODE_NONE;
3418 rc = -EOPNOTSUPP;
3419 break;
3420 }
3421 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3422 {
3423 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3424 card->gdev->dev.kobj.name,
3425 access_ctrl_req->subcmd_code,
3426 cmd->data.setadapterparms.hdr.return_code);
3427 dev_err(&card->gdev->dev,
3428 "Adapter is dedicated. "
3429 "QDIO data connection isolation not supported\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EOPNOTSUPP;
3434 break;
3435 }
3436 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3437 {
3438 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3439 card->gdev->dev.kobj.name,
3440 access_ctrl_req->subcmd_code,
3441 cmd->data.setadapterparms.hdr.return_code);
3442 dev_err(&card->gdev->dev,
3443 "TSO does not permit QDIO data connection isolation\n");
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = -EPERM;
3448 break;
3449 }
3450 default:
3451 {
3452 /* this should never happen */
3453 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3454 "==UNKNOWN\n",
3455 card->gdev->dev.kobj.name,
3456 access_ctrl_req->subcmd_code,
3457 cmd->data.setadapterparms.hdr.return_code);
3458
3459 /* ensure isolation mode is "none" */
3460 card->options.isolation = ISOLATION_MODE_NONE;
3461 rc = 0;
3462 break;
3463 }
3464 }
3465 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3466 return rc;
3467}
3468
3469static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3470 enum qeth_ipa_isolation_modes isolation)
3471{
3472 int rc;
3473 struct qeth_cmd_buffer *iob;
3474 struct qeth_ipa_cmd *cmd;
3475 struct qeth_set_access_ctrl *access_ctrl_req;
3476
3477 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3478
3479 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3480 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3481
3482 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3483 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3484 sizeof(struct qeth_set_access_ctrl));
3485 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3486 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3487 access_ctrl_req->subcmd_code = isolation;
3488
3489 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3490 NULL);
3491 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3492 return rc;
3493}
3494
3495int qeth_set_access_ctrl_online(struct qeth_card *card)
3496{
3497 int rc = 0;
3498
3499 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3500
3501 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3502 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3503 rc = qeth_setadpparms_set_access_ctrl(card,
3504 card->options.isolation);
3505 if (rc) {
3506 QETH_DBF_MESSAGE(3,
3507 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3508 card->gdev->dev.kobj.name,
3509 rc);
3510 }
3511 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3512 card->options.isolation = ISOLATION_MODE_NONE;
3513
3514 dev_err(&card->gdev->dev, "Adapter does not "
3515 "support QDIO data connection isolation\n");
3516 rc = -EOPNOTSUPP;
3517 }
3518 return rc;
3519}
3520EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3521
3392void qeth_tx_timeout(struct net_device *dev) 3522void qeth_tx_timeout(struct net_device *dev)
3393{ 3523{
3394 struct qeth_card *card; 3524 struct qeth_card *card;
3395 3525
3526 QETH_DBF_TEXT(TRACE, 4, "txtimeo");
3396 card = dev->ml_priv; 3527 card = dev->ml_priv;
3397 card->stats.tx_errors++; 3528 card->stats.tx_errors++;
3398 qeth_schedule_recovery(card); 3529 qeth_schedule_recovery(card);
@@ -3674,9 +3805,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
3674 init_data.input_handler = card->discipline.input_handler; 3805 init_data.input_handler = card->discipline.input_handler;
3675 init_data.output_handler = card->discipline.output_handler; 3806 init_data.output_handler = card->discipline.output_handler;
3676 init_data.int_parm = (unsigned long) card; 3807 init_data.int_parm = (unsigned long) card;
3677 init_data.flags = QDIO_INBOUND_0COPY_SBALS |
3678 QDIO_OUTBOUND_0COPY_SBALS |
3679 QDIO_USE_OUTBOUND_PCIS;
3680 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3808 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3681 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3809 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
3682 3810
@@ -3731,60 +3859,39 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3731 3859
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3860int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3861{
3734 struct qdio_ssqd_desc *ssqd; 3862 int retries = 0;
3735 int retries = 3;
3736 int mpno = 0;
3737 int rc; 3863 int rc;
3738 3864
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3865 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3866 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3867retry:
3742 if (retries < 3) { 3868 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3869 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3870 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3871 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3872 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3873 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3874 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3875 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3876 goto retriable;
3751 } 3877 rc = ccw_device_set_online(CARD_WDEV(card));
3878 if (rc)
3879 goto retriable;
3880 rc = ccw_device_set_online(CARD_DDEV(card));
3881 if (rc)
3882 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3883 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3884retriable:
3753 if (rc == -ERESTARTSYS) { 3885 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3886 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3887 return rc;
3756 } else if (rc) { 3888 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3889 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3890 if (++retries > 3)
3759 goto out; 3891 goto out;
3760 else 3892 else
3761 goto retry; 3893 goto retry;
3762 } 3894 }
3763
3764 rc = qeth_get_unitaddr(card);
3765 if (rc) {
3766 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3767 return rc;
3768 }
3769
3770 ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
3771 if (!ssqd) {
3772 rc = -ENOMEM;
3773 goto out;
3774 }
3775 rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
3776 if (rc == 0)
3777 mpno = ssqd->pcnt;
3778 kfree(ssqd);
3779
3780 if (mpno)
3781 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3782 if (card->info.portno > mpno) {
3783 QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
3784 "\n.", CARD_BUS_ID(card), card->info.portno);
3785 rc = -ENODEV;
3786 goto out;
3787 }
3788 qeth_init_tokens(card); 3895 qeth_init_tokens(card);
3789 qeth_init_func_level(card); 3896 qeth_init_func_level(card);
3790 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 3897 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3868,7 +3975,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3868 struct qdio_buffer_element *element = *__element; 3975 struct qdio_buffer_element *element = *__element;
3869 int offset = *__offset; 3976 int offset = *__offset;
3870 struct sk_buff *skb = NULL; 3977 struct sk_buff *skb = NULL;
3871 int skb_len; 3978 int skb_len = 0;
3872 void *data_ptr; 3979 void *data_ptr;
3873 int data_len; 3980 int data_len;
3874 int headroom = 0; 3981 int headroom = 0;
@@ -3887,20 +3994,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3887 *hdr = element->addr + offset; 3994 *hdr = element->addr + offset;
3888 3995
3889 offset += sizeof(struct qeth_hdr); 3996 offset += sizeof(struct qeth_hdr);
3890 if (card->options.layer2) { 3997 switch ((*hdr)->hdr.l2.id) {
3891 if (card->info.type == QETH_CARD_TYPE_OSN) { 3998 case QETH_HEADER_TYPE_LAYER2:
3892 skb_len = (*hdr)->hdr.osn.pdu_length; 3999 skb_len = (*hdr)->hdr.l2.pkt_length;
3893 headroom = sizeof(struct qeth_hdr); 4000 break;
3894 } else { 4001 case QETH_HEADER_TYPE_LAYER3:
3895 skb_len = (*hdr)->hdr.l2.pkt_length;
3896 }
3897 } else {
3898 skb_len = (*hdr)->hdr.l3.length; 4002 skb_len = (*hdr)->hdr.l3.length;
3899 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 4003 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
3900 (card->info.link_type == QETH_LINK_TYPE_HSTR)) 4004 (card->info.link_type == QETH_LINK_TYPE_HSTR))
3901 headroom = TR_HLEN; 4005 headroom = TR_HLEN;
3902 else 4006 else
3903 headroom = ETH_HLEN; 4007 headroom = ETH_HLEN;
4008 break;
4009 case QETH_HEADER_TYPE_OSN:
4010 skb_len = (*hdr)->hdr.osn.pdu_length;
4011 headroom = sizeof(struct qeth_hdr);
4012 break;
4013 default:
4014 break;
3904 } 4015 }
3905 4016
3906 if (!skb_len) 4017 if (!skb_len)
@@ -4055,6 +4166,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
4055 card->discipline.ccwgdriver = NULL; 4166 card->discipline.ccwgdriver = NULL;
4056} 4167}
4057 4168
4169static void qeth_determine_capabilities(struct qeth_card *card)
4170{
4171 int rc;
4172 int length;
4173 char *prcd;
4174
4175 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4176 rc = ccw_device_set_online(CARD_DDEV(card));
4177 if (rc) {
4178 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4179 goto out;
4180 }
4181
4182
4183 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4184 if (rc) {
4185 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4186 dev_name(&card->gdev->dev), rc);
4187 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4188 goto out_offline;
4189 }
4190 qeth_configure_unitaddr(card, prcd);
4191 qeth_configure_blkt_default(card, prcd);
4192 kfree(prcd);
4193
4194 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4195 if (rc)
4196 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4197
4198out_offline:
4199 ccw_device_set_offline(CARD_DDEV(card));
4200out:
4201 return;
4202}
4203
4058static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4204static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4059{ 4205{
4060 struct qeth_card *card; 4206 struct qeth_card *card;
@@ -4120,6 +4266,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4120 write_lock_irqsave(&qeth_core_card_list.rwlock, flags); 4266 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4121 list_add_tail(&card->list, &qeth_core_card_list.list); 4267 list_add_tail(&card->list, &qeth_core_card_list.list);
4122 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); 4268 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4269
4270 qeth_determine_capabilities(card);
4123 return 0; 4271 return 0;
4124 4272
4125err_card: 4273err_card:
@@ -4303,13 +4451,19 @@ static struct {
4303 {"tx do_QDIO time"}, 4451 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4452 {"tx do_QDIO count"},
4305 {"tx csum"}, 4453 {"tx csum"},
4454 {"tx lin"},
4306}; 4455};
4307 4456
4308int qeth_core_get_stats_count(struct net_device *dev) 4457int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4309{ 4458{
4310 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); 4459 switch (stringset) {
4460 case ETH_SS_STATS:
4461 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4462 default:
4463 return -EINVAL;
4464 }
4311} 4465}
4312EXPORT_SYMBOL_GPL(qeth_core_get_stats_count); 4466EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4313 4467
4314void qeth_core_get_ethtool_stats(struct net_device *dev, 4468void qeth_core_get_ethtool_stats(struct net_device *dev,
4315 struct ethtool_stats *stats, u64 *data) 4469 struct ethtool_stats *stats, u64 *data)
@@ -4355,6 +4509,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4355 data[31] = card->perf_stats.outbound_do_qdio_time; 4509 data[31] = card->perf_stats.outbound_do_qdio_time;
4356 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4510 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4357 data[33] = card->perf_stats.tx_csum; 4511 data[33] = card->perf_stats.tx_csum;
4512 data[34] = card->perf_stats.tx_lin;
4358} 4513}
4359EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4514EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4360 4515
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..104a3351e02b 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
156 IPA_RC_IP_TABLE_FULL = 0x0002, 156 IPA_RC_IP_TABLE_FULL = 0x0002,
157 IPA_RC_UNKNOWN_ERROR = 0x0003, 157 IPA_RC_UNKNOWN_ERROR = 0x0003,
158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004, 158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
159 IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
160 IPA_RC_INVALID_FORMAT = 0x0006,
159 IPA_RC_DUP_IPV6_REMOTE = 0x0008, 161 IPA_RC_DUP_IPV6_REMOTE = 0x0008,
160 IPA_RC_DUP_IPV6_HOME = 0x0010, 162 IPA_RC_DUP_IPV6_HOME = 0x0010,
161 IPA_RC_UNREGISTERED_ADDR = 0x0011, 163 IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
196 IPA_RC_INVALID_IP_VERSION2 = 0xf001, 198 IPA_RC_INVALID_IP_VERSION2 = 0xf001,
197 IPA_RC_FFFF = 0xffff 199 IPA_RC_FFFF = 0xffff
198}; 200};
201/* for DELIP */
202#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
203/* for SET_DIAGNOSTIC_ASSIST */
204#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
205#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
199 206
200/* IPA function flags; each flag marks availability of respective function */ 207/* IPA function flags; each flag marks availability of respective function */
201enum qeth_ipa_funcs { 208enum qeth_ipa_funcs {
@@ -234,18 +241,20 @@ enum qeth_ipa_setdelip_flags {
234 241
235/* SETADAPTER IPA Command: ****************************************************/ 242/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 243enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 244 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 245 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 246 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 247 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 248 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 249 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 250 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 251 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 252 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 253 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 254 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 255 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
256 IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
257 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 258};
250enum qeth_ipa_mac_ops { 259enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 260 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +273,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 273 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 274 SET_PROMISC_MODE_ON = 1,
266}; 275};
276enum qeth_ipa_isolation_modes {
277 ISOLATION_MODE_NONE = 0x00000000L,
278 ISOLATION_MODE_FWD = 0x00000001L,
279 ISOLATION_MODE_DROP = 0x00000002L,
280};
281enum qeth_ipa_set_access_mode_rc {
282 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
283 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
284 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
285 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
286 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
287 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
288};
289
267 290
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 291/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 292struct qeth_ipacmd_setdelip4 {
@@ -376,6 +399,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 399 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 400} __attribute__((packed));
378 401
402/* SET_ACCESS_CONTROL: same format for request and reply */
403struct qeth_set_access_ctrl {
404 __u32 subcmd_code;
405} __attribute__((packed));
406
379struct qeth_ipacmd_setadpparms_hdr { 407struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 408 __u32 supp_hw_cmds;
381 __u32 reserved1; 409 __u32 reserved1;
@@ -394,6 +422,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 422 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 423 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 424 struct qeth_snmp_cmd snmp;
425 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 426 __u32 mode;
398 } data; 427 } data;
399} __attribute__ ((packed)); 428} __attribute__ ((packed));
@@ -403,6 +432,40 @@ struct qeth_create_destroy_address {
403 __u8 unique_id[8]; 432 __u8 unique_id[8];
404} __attribute__ ((packed)); 433} __attribute__ ((packed));
405 434
435/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
436
437enum qeth_diags_cmds {
438 QETH_DIAGS_CMD_QUERY = 0x0001,
439 QETH_DIAGS_CMD_TRAP = 0x0002,
440 QETH_DIAGS_CMD_TRACE = 0x0004,
441 QETH_DIAGS_CMD_NOLOG = 0x0008,
442 QETH_DIAGS_CMD_DUMP = 0x0010,
443};
444
445enum qeth_diags_trace_types {
446 QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
447};
448
449enum qeth_diags_trace_cmds {
450 QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
451 QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
452 QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
453 QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
454 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
455};
456
457struct qeth_ipacmd_diagass {
458 __u32 host_tod2;
459 __u32:32;
460 __u16 subcmd_len;
461 __u16:16;
462 __u32 subcmd;
463 __u8 type;
464 __u8 action;
465 __u16 options;
466 __u32:32;
467} __attribute__ ((packed));
468
406/* Header for each IPA command */ 469/* Header for each IPA command */
407struct qeth_ipacmd_hdr { 470struct qeth_ipacmd_hdr {
408 __u8 command; 471 __u8 command;
@@ -431,6 +494,7 @@ struct qeth_ipa_cmd {
431 struct qeth_create_destroy_address create_destroy_addr; 494 struct qeth_create_destroy_address create_destroy_addr;
432 struct qeth_ipacmd_setadpparms setadapterparms; 495 struct qeth_ipacmd_setadpparms setadapterparms;
433 struct qeth_set_routing setrtg; 496 struct qeth_set_routing setrtg;
497 struct qeth_ipacmd_diagass diagass;
434 } data; 498 } data;
435} __attribute__ ((packed)); 499} __attribute__ ((packed));
436 500
@@ -448,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
448 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, 512 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
449}; 513};
450 514
451
452extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 515extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
453extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); 516extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
454 517
@@ -507,7 +570,7 @@ extern unsigned char ULP_ENABLE[];
507 (PDU_ENCAPSULATION(buffer) + 0x17) 570 (PDU_ENCAPSULATION(buffer) + 0x17)
508#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ 571#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
509 (PDU_ENCAPSULATION(buffer) + 0x2b) 572 (PDU_ENCAPSULATION(buffer) + 0x2b)
510/* Layer 2 defintions */ 573/* Layer 2 definitions */
511#define QETH_PROT_LAYER2 0x08 574#define QETH_PROT_LAYER2 0x08
512#define QETH_PROT_TCPIP 0x03 575#define QETH_PROT_TCPIP 0x03
513#define QETH_PROT_OSN2 0x0a 576#define QETH_PROT_OSN2 0x0a
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..25dfd5abd19b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -8,6 +8,9 @@
8 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "qeth"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
11#include <linux/list.h> 14#include <linux/list.h>
12#include <linux/rwsem.h> 15#include <linux/rwsem.h>
13#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
@@ -118,7 +121,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
118{ 121{
119 struct qeth_card *card = dev_get_drvdata(dev); 122 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp; 123 char *tmp;
121 unsigned int portno; 124 unsigned int portno, limit;
122 125
123 if (!card) 126 if (!card)
124 return -EINVAL; 127 return -EINVAL;
@@ -128,9 +131,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
128 return -EPERM; 131 return -EPERM;
129 132
130 portno = simple_strtoul(buf, &tmp, 16); 133 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) { 134 if (portno > QETH_MAX_PORTNO)
135 return -EINVAL;
136 limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
137 if (portno > limit)
132 return -EINVAL; 138 return -EINVAL;
133 }
134 139
135 card->info.portno = portno; 140 card->info.portno = portno;
136 return count; 141 return count;
@@ -416,7 +421,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 421static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 422 qeth_dev_layer2_store);
418 423
419static ssize_t qeth_dev_large_send_show(struct device *dev, 424#define ATTR_QETH_ISOLATION_NONE ("none")
425#define ATTR_QETH_ISOLATION_FWD ("forward")
426#define ATTR_QETH_ISOLATION_DROP ("drop")
427
428static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 429 struct device_attribute *attr, char *buf)
421{ 430{
422 struct qeth_card *card = dev_get_drvdata(dev); 431 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +433,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 433 if (!card)
425 return -EINVAL; 434 return -EINVAL;
426 435
427 switch (card->options.large_send) { 436 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 437 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 438 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 439 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 440 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
441 case ISOLATION_MODE_DROP:
442 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 443 default:
433 return sprintf(buf, "%s\n", "N/A"); 444 return snprintf(buf, 5, "%s\n", "N/A");
434 } 445 }
435} 446}
436 447
437static ssize_t qeth_dev_large_send_store(struct device *dev, 448static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 449 struct device_attribute *attr, const char *buf, size_t count)
439{ 450{
440 struct qeth_card *card = dev_get_drvdata(dev); 451 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 452 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 453 int rc = 0;
443 char *tmp; 454 char *tmp, *curtoken;
455 curtoken = (char *) buf;
444 456
445 if (!card) 457 if (!card) {
446 return -EINVAL; 458 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 459 goto out;
448 if (!strcmp(tmp, "no")) { 460 }
449 type = QETH_LARGE_SEND_NO; 461
450 } else if (!strcmp(tmp, "TSO")) { 462 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 463 if (card->info.type != QETH_CARD_TYPE_OSAE &&
464 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
465 rc = -EOPNOTSUPP;
466 dev_err(&card->gdev->dev, "Adapter does not "
467 "support QDIO data connection isolation\n");
468 goto out;
469 }
470
471 /* parse input into isolation mode */
472 tmp = strsep(&curtoken, "\n");
473 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
474 isolation = ISOLATION_MODE_NONE;
475 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
476 isolation = ISOLATION_MODE_FWD;
477 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
478 isolation = ISOLATION_MODE_DROP;
452 } else { 479 } else {
453 return -EINVAL; 480 rc = -EINVAL;
481 goto out;
454 } 482 }
455 if (card->options.large_send == type) 483 rc = count;
456 return count; 484
457 rc = qeth_set_large_send(card, type); 485 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 486 card->options.isolation = isolation;
459 return rc; 487 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 488 card->state == CARD_STATE_UP) {
489 int ipa_rc = qeth_set_access_ctrl_online(card);
490 if (ipa_rc != 0)
491 rc = ipa_rc;
492 }
493out:
494 return rc;
461} 495}
462 496
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 497static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 498 qeth_dev_isolation_store);
465 499
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 500static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 501{
@@ -508,7 +542,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
508 struct qeth_card *card = dev_get_drvdata(dev); 542 struct qeth_card *card = dev_get_drvdata(dev);
509 543
510 return qeth_dev_blkt_store(card, buf, count, 544 return qeth_dev_blkt_store(card, buf, count,
511 &card->info.blkt.time_total, 1000); 545 &card->info.blkt.time_total, 5000);
512} 546}
513 547
514 548
@@ -530,7 +564,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
530 struct qeth_card *card = dev_get_drvdata(dev); 564 struct qeth_card *card = dev_get_drvdata(dev);
531 565
532 return qeth_dev_blkt_store(card, buf, count, 566 return qeth_dev_blkt_store(card, buf, count,
533 &card->info.blkt.inter_packet, 100); 567 &card->info.blkt.inter_packet, 1000);
534} 568}
535 569
536static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, 570static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -551,7 +585,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
551 struct qeth_card *card = dev_get_drvdata(dev); 585 struct qeth_card *card = dev_get_drvdata(dev);
552 586
553 return qeth_dev_blkt_store(card, buf, count, 587 return qeth_dev_blkt_store(card, buf, count,
554 &card->info.blkt.inter_packet_jumbo, 100); 588 &card->info.blkt.inter_packet_jumbo, 1000);
555} 589}
556 590
557static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, 591static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
@@ -582,7 +616,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 616 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 617 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 618 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 619 &dev_attr_isolation.attr,
586 NULL, 620 NULL,
587}; 621};
588 622
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f4f3ca1393b2..6a801dc3bf8e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/slab.h>
19#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
20#include <linux/mii.h> 21#include <linux/mii.h>
21#include <linux/ip.h> 22#include <linux/ip.h>
@@ -486,22 +487,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
486 case IPA_RC_L2_DUP_MAC: 487 case IPA_RC_L2_DUP_MAC:
487 case IPA_RC_L2_DUP_LAYER3_MAC: 488 case IPA_RC_L2_DUP_LAYER3_MAC:
488 dev_warn(&card->gdev->dev, 489 dev_warn(&card->gdev->dev,
489 "MAC address " 490 "MAC address %pM already exists\n",
490 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 491 card->dev->dev_addr);
491 "already exists\n",
492 card->dev->dev_addr[0], card->dev->dev_addr[1],
493 card->dev->dev_addr[2], card->dev->dev_addr[3],
494 card->dev->dev_addr[4], card->dev->dev_addr[5]);
495 break; 492 break;
496 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 493 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
497 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 494 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
498 dev_warn(&card->gdev->dev, 495 dev_warn(&card->gdev->dev,
499 "MAC address " 496 "MAC address %pM is not authorized\n",
500 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 497 card->dev->dev_addr);
501 "is not authorized\n",
502 card->dev->dev_addr[0], card->dev->dev_addr[1],
503 card->dev->dev_addr[2], card->dev->dev_addr[3],
504 card->dev->dev_addr[4], card->dev->dev_addr[5]);
505 break; 498 break;
506 default: 499 default:
507 break; 500 break;
@@ -512,12 +505,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
512 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, 505 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
513 OSA_ADDR_LEN); 506 OSA_ADDR_LEN);
514 dev_info(&card->gdev->dev, 507 dev_info(&card->gdev->dev,
515 "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 508 "MAC address %pM successfully registered on device %s\n",
516 "successfully registered on device %s\n", 509 card->dev->dev_addr, card->dev->name);
517 card->dev->dev_addr[0], card->dev->dev_addr[1],
518 card->dev->dev_addr[2], card->dev->dev_addr[3],
519 card->dev->dev_addr[4], card->dev->dev_addr[5],
520 card->dev->name);
521 } 510 }
522 return 0; 511 return 0;
523} 512}
@@ -634,7 +623,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
634 for (dm = dev->mc_list; dm; dm = dm->next) 623 for (dm = dev->mc_list; dm; dm = dm->next)
635 qeth_l2_add_mc(card, dm->da_addr, 0); 624 qeth_l2_add_mc(card, dm->da_addr, 0);
636 625
637 list_for_each_entry(ha, &dev->uc.list, list) 626 netdev_for_each_uc_addr(ha, dev)
638 qeth_l2_add_mc(card, ha->addr, 1); 627 qeth_l2_add_mc(card, ha->addr, 1);
639 628
640 spin_unlock_bh(&card->mclock); 629 spin_unlock_bh(&card->mclock);
@@ -781,7 +770,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
781 index = i % QDIO_MAX_BUFFERS_PER_Q; 770 index = i % QDIO_MAX_BUFFERS_PER_Q;
782 buffer = &card->qdio.in_q->bufs[index]; 771 buffer = &card->qdio.in_q->bufs[index];
783 if (!(qdio_err && 772 if (!(qdio_err &&
784 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr"))) 773 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
774 "qinerr")))
785 qeth_l2_process_inbound_buffer(card, buffer, index); 775 qeth_l2_process_inbound_buffer(card, buffer, index);
786 /* clear buffer and give back to hardware */ 776 /* clear buffer and give back to hardware */
787 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 777 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -866,7 +856,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
866 .get_link = ethtool_op_get_link, 856 .get_link = ethtool_op_get_link,
867 .get_strings = qeth_core_get_strings, 857 .get_strings = qeth_core_get_strings,
868 .get_ethtool_stats = qeth_core_get_ethtool_stats, 858 .get_ethtool_stats = qeth_core_get_ethtool_stats,
869 .get_stats_count = qeth_core_get_stats_count, 859 .get_sset_count = qeth_core_get_sset_count,
870 .get_drvinfo = qeth_core_get_drvinfo, 860 .get_drvinfo = qeth_core_get_drvinfo,
871 .get_settings = qeth_core_ethtool_get_settings, 861 .get_settings = qeth_core_ethtool_get_settings,
872}; 862};
@@ -874,7 +864,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
874static const struct ethtool_ops qeth_l2_osn_ops = { 864static const struct ethtool_ops qeth_l2_osn_ops = {
875 .get_strings = qeth_core_get_strings, 865 .get_strings = qeth_core_get_strings,
876 .get_ethtool_stats = qeth_core_get_ethtool_stats, 866 .get_ethtool_stats = qeth_core_get_ethtool_stats,
877 .get_stats_count = qeth_core_get_stats_count, 867 .get_sset_count = qeth_core_get_sset_count,
878 .get_drvinfo = qeth_core_get_drvinfo, 868 .get_drvinfo = qeth_core_get_drvinfo,
879}; 869};
880 870
@@ -938,32 +928,18 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
938 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 928 QETH_DBF_TEXT(SETUP, 2, "setonlin");
939 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 929 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
940 930
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 931 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 932 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 933 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 934 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
935 rc = -ENODEV;
962 goto out_remove; 936 goto out_remove;
963 } 937 }
964 938
965 if (!card->dev && qeth_l2_setup_netdev(card)) 939 if (!card->dev && qeth_l2_setup_netdev(card)) {
940 rc = -ENODEV;
966 goto out_remove; 941 goto out_remove;
942 }
967 943
968 if (card->info.type != QETH_CARD_TYPE_OSN) 944 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 945 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +959,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 959 card->lan_online = 0;
984 return 0; 960 return 0;
985 } 961 }
962 rc = -ENODEV;
986 goto out_remove; 963 goto out_remove;
987 } else 964 } else
988 card->lan_online = 1; 965 card->lan_online = 1;
989 966
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 967 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 968 /* configure isolation level */
969 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 970 qeth_l2_process_vlans(card, 0);
993 } 971 }
994 972
@@ -997,6 +975,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 975 rc = qeth_init_qdio_queues(card);
998 if (rc) { 976 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 977 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
978 rc = -ENODEV;
1000 goto out_remove; 979 goto out_remove;
1001 } 980 }
1002 card->state = CARD_STATE_SOFTSETUP; 981 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +997,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 997 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 998 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 999 return 0;
1000
1021out_remove: 1001out_remove:
1022 card->use_hard_stop = 1; 1002 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1003 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1008,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1008 card->state = CARD_STATE_RECOVER;
1029 else 1009 else
1030 card->state = CARD_STATE_DOWN; 1010 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1011 return rc;
1032} 1012}
1033 1013
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1014static int qeth_l2_set_online(struct ccwgroup_device *gdev)
@@ -1092,11 +1072,9 @@ static int qeth_l2_recover(void *ptr)
1092 dev_info(&card->gdev->dev, 1072 dev_info(&card->gdev->dev,
1093 "Device successfully recovered!\n"); 1073 "Device successfully recovered!\n");
1094 else { 1074 else {
1095 if (card->dev) { 1075 rtnl_lock();
1096 rtnl_lock(); 1076 dev_close(card->dev);
1097 dev_close(card->dev); 1077 rtnl_unlock();
1098 rtnl_unlock();
1099 }
1100 dev_warn(&card->gdev->dev, "The qeth device driver " 1078 dev_warn(&card->gdev->dev, "The qeth device driver "
1101 "failed to recover an error on the device\n"); 1079 "failed to recover an error on the device\n");
1102 } 1080 }
@@ -1150,11 +1128,9 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
1150 if (card->state == CARD_STATE_RECOVER) { 1128 if (card->state == CARD_STATE_RECOVER) {
1151 rc = __qeth_l2_set_online(card->gdev, 1); 1129 rc = __qeth_l2_set_online(card->gdev, 1);
1152 if (rc) { 1130 if (rc) {
1153 if (card->dev) { 1131 rtnl_lock();
1154 rtnl_lock(); 1132 dev_close(card->dev);
1155 dev_close(card->dev); 1133 rtnl_unlock();
1156 rtnl_unlock();
1157 }
1158 } 1134 }
1159 } else 1135 } else
1160 rc = __qeth_l2_set_online(card->gdev, 0); 1136 rc = __qeth_l2_set_online(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..8447d233d0b3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
13 13
14#include "qeth_core.h" 14#include "qeth_core.h"
15 15
16#define QETH_SNIFF_AVAIL 0x0008
17
16struct qeth_ipaddr { 18struct qeth_ipaddr {
17 struct list_head entry; 19 struct list_head entry;
18 enum qeth_ip_types type; 20 enum qeth_ip_types type;
@@ -60,5 +62,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 62int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 64 const u8 *);
65int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
66int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 67
64#endif /* __QETH_L3_H__ */ 68#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 073b6d354915..fc6ca1da8b98 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -22,6 +22,7 @@
22#include <linux/ipv6.h> 22#include <linux/ipv6.h>
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <linux/igmp.h> 24#include <linux/igmp.h>
25#include <linux/slab.h>
25 26
26#include <net/ip.h> 27#include <net/ip.h>
27#include <net/arp.h> 28#include <net/arp.h>
@@ -41,6 +42,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 42static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 43static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 44
45int qeth_l3_set_large_send(struct qeth_card *card,
46 enum qeth_large_send_types type)
47{
48 int rc = 0;
49
50 card->options.large_send = type;
51 if (card->dev == NULL)
52 return 0;
53
54 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
55 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
56 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
57 NETIF_F_HW_CSUM;
58 } else {
59 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
60 NETIF_F_HW_CSUM);
61 card->options.large_send = QETH_LARGE_SEND_NO;
62 rc = -EOPNOTSUPP;
63 }
64 } else {
65 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
66 NETIF_F_HW_CSUM);
67 card->options.large_send = QETH_LARGE_SEND_NO;
68 }
69 return rc;
70}
44 71
45static int qeth_l3_isxdigit(char *buf) 72static int qeth_l3_isxdigit(char *buf)
46{ 73{
@@ -216,6 +243,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
216 struct qeth_ipaddr *tmp, *t; 243 struct qeth_ipaddr *tmp, *t;
217 int found = 0; 244 int found = 0;
218 245
246 if (card->options.sniffer)
247 return 0;
219 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 248 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
220 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 249 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
221 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 250 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -431,6 +460,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
431 QETH_DBF_TEXT(TRACE, 2, "sdiplist"); 460 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
432 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 461 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
433 462
463 if (card->options.sniffer)
464 return;
434 spin_lock_irqsave(&card->ip_lock, flags); 465 spin_lock_irqsave(&card->ip_lock, flags);
435 tbd_list = card->ip_tbd_list; 466 tbd_list = card->ip_tbd_list;
436 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 467 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -469,7 +500,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
469 spin_unlock_irqrestore(&card->ip_lock, flags); 500 spin_unlock_irqrestore(&card->ip_lock, flags);
470 rc = qeth_l3_deregister_addr_entry(card, addr); 501 rc = qeth_l3_deregister_addr_entry(card, addr);
471 spin_lock_irqsave(&card->ip_lock, flags); 502 spin_lock_irqsave(&card->ip_lock, flags);
472 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) 503 if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
473 kfree(addr); 504 kfree(addr);
474 else 505 else
475 list_add_tail(&addr->entry, &card->ip_list); 506 list_add_tail(&addr->entry, &card->ip_list);
@@ -487,6 +518,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
487 unsigned long flags; 518 unsigned long flags;
488 519
489 QETH_DBF_TEXT(TRACE, 4, "clearip"); 520 QETH_DBF_TEXT(TRACE, 4, "clearip");
521 if (recover && card->options.sniffer)
522 return;
490 spin_lock_irqsave(&card->ip_lock, flags); 523 spin_lock_irqsave(&card->ip_lock, flags);
491 /* clear todo list */ 524 /* clear todo list */
492 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { 525 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1439,6 +1472,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1472 return 0;
1440} 1473}
1441 1474
1475int qeth_l3_set_rx_csum(struct qeth_card *card,
1476 enum qeth_checksum_types csum_type)
1477{
1478 int rc = 0;
1479
1480 if (card->options.checksum_type == HW_CHECKSUMMING) {
1481 if ((csum_type != HW_CHECKSUMMING) &&
1482 (card->state != CARD_STATE_DOWN)) {
1483 rc = qeth_l3_send_simple_setassparms(card,
1484 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1485 if (rc)
1486 return -EIO;
1487 }
1488 } else {
1489 if (csum_type == HW_CHECKSUMMING) {
1490 if (card->state != CARD_STATE_DOWN) {
1491 if (!qeth_is_supported(card,
1492 IPA_INBOUND_CHECKSUM))
1493 return -EPERM;
1494 rc = qeth_l3_send_checksum_command(card);
1495 if (rc)
1496 return -EIO;
1497 }
1498 }
1499 }
1500 card->options.checksum_type = csum_type;
1501 return rc;
1502}
1503
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1504static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1505{
1444 int rc = 0; 1506 int rc = 0;
@@ -1506,6 +1568,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1568static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1569{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1570 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1571
1572 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1573 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1574 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1575 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -1617,6 +1681,80 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
1617 return rc; 1681 return rc;
1618} 1682}
1619 1683
1684static int
1685qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1686 unsigned long data)
1687{
1688 struct qeth_ipa_cmd *cmd;
1689 __u16 rc;
1690
1691 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1692
1693 cmd = (struct qeth_ipa_cmd *)data;
1694 rc = cmd->hdr.return_code;
1695 if (rc)
1696 QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
1697 switch (cmd->data.diagass.action) {
1698 case QETH_DIAGS_CMD_TRACE_QUERY:
1699 break;
1700 case QETH_DIAGS_CMD_TRACE_DISABLE:
1701 switch (rc) {
1702 case 0:
1703 case IPA_RC_INVALID_SUBCMD:
1704 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1705 dev_info(&card->gdev->dev, "The HiperSockets network "
1706 "traffic analyzer is deactivated\n");
1707 break;
1708 default:
1709 break;
1710 }
1711 break;
1712 case QETH_DIAGS_CMD_TRACE_ENABLE:
1713 switch (rc) {
1714 case 0:
1715 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1716 dev_info(&card->gdev->dev, "The HiperSockets network "
1717 "traffic analyzer is activated\n");
1718 break;
1719 case IPA_RC_HARDWARE_AUTH_ERROR:
1720 dev_warn(&card->gdev->dev, "The device is not "
1721 "authorized to run as a HiperSockets network "
1722 "traffic analyzer\n");
1723 break;
1724 case IPA_RC_TRACE_ALREADY_ACTIVE:
1725 dev_warn(&card->gdev->dev, "A HiperSockets "
1726 "network traffic analyzer is already "
1727 "active in the HiperSockets LAN\n");
1728 break;
1729 default:
1730 break;
1731 }
1732 break;
1733 default:
1734 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1735 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1736 }
1737
1738 return 0;
1739}
1740
1741static int
1742qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1743{
1744 struct qeth_cmd_buffer *iob;
1745 struct qeth_ipa_cmd *cmd;
1746
1747 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1748
1749 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1750 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1751 cmd->data.diagass.subcmd_len = 16;
1752 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1753 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1754 cmd->data.diagass.action = diags_cmd;
1755 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1756}
1757
1620static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1758static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1621 struct net_device *dev) 1759 struct net_device *dev)
1622{ 1760{
@@ -1894,7 +2032,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1894 case QETH_CAST_ANYCAST: 2032 case QETH_CAST_ANYCAST:
1895 case QETH_CAST_NOCAST: 2033 case QETH_CAST_NOCAST:
1896 default: 2034 default:
1897 skb->pkt_type = PACKET_HOST; 2035 if (card->options.sniffer)
2036 skb->pkt_type = PACKET_OTHERHOST;
2037 else
2038 skb->pkt_type = PACKET_HOST;
1898 memcpy(tg_addr, card->dev->dev_addr, 2039 memcpy(tg_addr, card->dev->dev_addr,
1899 card->dev->addr_len); 2040 card->dev->addr_len);
1900 } 2041 }
@@ -1950,7 +2091,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1950 int offset; 2091 int offset;
1951 __u16 vlan_tag = 0; 2092 __u16 vlan_tag = 0;
1952 unsigned int len; 2093 unsigned int len;
1953
1954 /* get first element of current buffer */ 2094 /* get first element of current buffer */
1955 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2095 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
1956 offset = 0; 2096 offset = 0;
@@ -1969,7 +2109,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1969 case QETH_HEADER_TYPE_LAYER3: 2109 case QETH_HEADER_TYPE_LAYER3:
1970 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2110 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
1971 len = skb->len; 2111 len = skb->len;
1972 if (vlan_tag) 2112 if (vlan_tag && !card->options.sniffer)
1973 if (card->vlangrp) 2113 if (card->vlangrp)
1974 vlan_hwaccel_rx(skb, card->vlangrp, 2114 vlan_hwaccel_rx(skb, card->vlangrp,
1975 vlan_tag); 2115 vlan_tag);
@@ -1980,6 +2120,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
1980 else 2120 else
1981 netif_rx(skb); 2121 netif_rx(skb);
1982 break; 2122 break;
2123 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2124 skb->pkt_type = PACKET_HOST;
2125 skb->protocol = eth_type_trans(skb, skb->dev);
2126 if (card->options.checksum_type == NO_CHECKSUMMING)
2127 skb->ip_summed = CHECKSUM_UNNECESSARY;
2128 else
2129 skb->ip_summed = CHECKSUM_NONE;
2130 len = skb->len;
2131 netif_receive_skb(skb);
2132 break;
1983 default: 2133 default:
1984 dev_kfree_skb_any(skb); 2134 dev_kfree_skb_any(skb);
1985 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 2135 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2061,17 +2211,18 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2061 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2211 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2062 2212
2063 qeth_set_allowed_threads(card, 0, 1); 2213 qeth_set_allowed_threads(card, 0, 1);
2214 if (card->options.sniffer &&
2215 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
2216 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2064 if (card->read.state == CH_STATE_UP && 2217 if (card->read.state == CH_STATE_UP &&
2065 card->write.state == CH_STATE_UP && 2218 card->write.state == CH_STATE_UP &&
2066 (card->state == CARD_STATE_UP)) { 2219 (card->state == CARD_STATE_UP)) {
2067 if (recovery_mode) 2220 if (recovery_mode)
2068 qeth_l3_stop(card->dev); 2221 qeth_l3_stop(card->dev);
2069 else { 2222 else {
2070 if (card->dev) { 2223 rtnl_lock();
2071 rtnl_lock(); 2224 dev_close(card->dev);
2072 dev_close(card->dev); 2225 rtnl_unlock();
2073 rtnl_unlock();
2074 }
2075 } 2226 }
2076 if (!card->use_hard_stop) { 2227 if (!card->use_hard_stop) {
2077 rc = qeth_send_stoplan(card); 2228 rc = qeth_send_stoplan(card);
@@ -2105,6 +2256,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2105 return rc; 2256 return rc;
2106} 2257}
2107 2258
2259/*
2260 * test for and Switch promiscuous mode (on or off)
2261 * either for guestlan or HiperSocket Sniffer
2262 */
2263static void
2264qeth_l3_handle_promisc_mode(struct qeth_card *card)
2265{
2266 struct net_device *dev = card->dev;
2267
2268 if (((dev->flags & IFF_PROMISC) &&
2269 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
2270 (!(dev->flags & IFF_PROMISC) &&
2271 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
2272 return;
2273
2274 if (card->info.guestlan) { /* Guestlan trace */
2275 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2276 qeth_setadp_promisc_mode(card);
2277 } else if (card->options.sniffer && /* HiperSockets trace */
2278 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
2279 if (dev->flags & IFF_PROMISC) {
2280 QETH_DBF_TEXT(TRACE, 3, "+promisc");
2281 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
2282 } else {
2283 QETH_DBF_TEXT(TRACE, 3, "-promisc");
2284 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2285 }
2286 }
2287}
2288
2108static void qeth_l3_set_multicast_list(struct net_device *dev) 2289static void qeth_l3_set_multicast_list(struct net_device *dev)
2109{ 2290{
2110 struct qeth_card *card = dev->ml_priv; 2291 struct qeth_card *card = dev->ml_priv;
@@ -2113,15 +2294,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
2113 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 2294 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
2114 (card->state != CARD_STATE_UP)) 2295 (card->state != CARD_STATE_UP))
2115 return; 2296 return;
2116 qeth_l3_delete_mc_addresses(card); 2297 if (!card->options.sniffer) {
2117 qeth_l3_add_multicast_ipv4(card); 2298 qeth_l3_delete_mc_addresses(card);
2299 qeth_l3_add_multicast_ipv4(card);
2118#ifdef CONFIG_QETH_IPV6 2300#ifdef CONFIG_QETH_IPV6
2119 qeth_l3_add_multicast_ipv6(card); 2301 qeth_l3_add_multicast_ipv6(card);
2120#endif 2302#endif
2121 qeth_l3_set_ip_addr_list(card); 2303 qeth_l3_set_ip_addr_list(card);
2122 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2304 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2123 return; 2305 return;
2124 qeth_setadp_promisc_mode(card); 2306 }
2307 qeth_l3_handle_promisc_mode(card);
2125} 2308}
2126 2309
2127static const char *qeth_l3_arp_get_error_cause(int *rc) 2310static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2684,6 +2867,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2867 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2868}
2686 2869
2870static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2871{
2872 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2873 tcp_hdr(skb)->doff * 4;
2874 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2875 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2876 elements += skb_shinfo(skb)->nr_frags;
2877 return elements;
2878}
2879
2880static inline int qeth_l3_tso_check(struct sk_buff *skb)
2881{
2882 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2883 (unsigned long)skb->data;
2884 return (((unsigned long)skb->data & PAGE_MASK) !=
2885 (((unsigned long)skb->data + len) & PAGE_MASK));
2886}
2887
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2888static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2889{
2689 int rc; 2890 int rc;
@@ -2702,9 +2903,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2702 int data_offset = -1; 2903 int data_offset = -1;
2703 int nr_frags; 2904 int nr_frags;
2704 2905
2705 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2906 if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) ||
2706 (skb->protocol != htons(ETH_P_IPV6)) && 2907 card->options.sniffer)
2707 (skb->protocol != htons(ETH_P_IP)))
2708 goto tx_drop; 2908 goto tx_drop;
2709 2909
2710 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2910 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -2750,14 +2950,14 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2750 if (data_offset < 0) 2950 if (data_offset < 0)
2751 skb_pull(new_skb, ETH_HLEN); 2951 skb_pull(new_skb, ETH_HLEN);
2752 } else { 2952 } else {
2753 if (new_skb->protocol == htons(ETH_P_IP)) { 2953 if (ipv == 4) {
2754 if (card->dev->type == ARPHRD_IEEE802_TR) 2954 if (card->dev->type == ARPHRD_IEEE802_TR)
2755 skb_pull(new_skb, TR_HLEN); 2955 skb_pull(new_skb, TR_HLEN);
2756 else 2956 else
2757 skb_pull(new_skb, ETH_HLEN); 2957 skb_pull(new_skb, ETH_HLEN);
2758 } 2958 }
2759 2959
2760 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp && 2960 if (ipv == 6 && card->vlangrp &&
2761 vlan_tx_tag_present(new_skb)) { 2961 vlan_tx_tag_present(new_skb)) {
2762 skb_push(new_skb, VLAN_HLEN); 2962 skb_push(new_skb, VLAN_HLEN);
2763 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 2963 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
@@ -2777,16 +2977,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2977 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2978 * chaining we can not send long frag lists
2779 */ 2979 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2980 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2981 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2982 if (skb_linearize(new_skb))
2783 goto tx_drop; 2983 goto tx_drop;
2984 if (card->options.performance_stats)
2985 card->perf_stats.tx_lin++;
2986 }
2784 } 2987 }
2785 2988
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2989 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2990 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2991 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2992 sizeof(struct qeth_hdr_tso));
2993 if (qeth_l3_tso_check(new_skb))
2994 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2995 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2996 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2997 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +3108,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 3108static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 3109{
2905 struct qeth_card *card = dev->ml_priv; 3110 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 3111 enum qeth_checksum_types csum_type;
2908 3112
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 3113 if (data)
2914 csum_type = HW_CHECKSUMMING; 3114 csum_type = HW_CHECKSUMMING;
2915 else 3115 else
2916 csum_type = SW_CHECKSUMMING; 3116 csum_type = SW_CHECKSUMMING;
2917 3117
2918 if (card->options.checksum_type != csum_type) { 3118 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 3119}
2928 3120
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 3121static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 3122{
2931 struct qeth_card *card = dev->ml_priv; 3123 struct qeth_card *card = dev->ml_priv;
3124 int rc = 0;
2932 3125
2933 if (data) { 3126 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3127 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3128 } else {
2942 dev->features &= ~NETIF_F_TSO; 3129 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3130 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3131 }
2945 return 0; 3132 return rc;
2946} 3133}
2947 3134
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3135static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -2957,7 +3144,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
2957 .set_tso = qeth_l3_ethtool_set_tso, 3144 .set_tso = qeth_l3_ethtool_set_tso,
2958 .get_strings = qeth_core_get_strings, 3145 .get_strings = qeth_core_get_strings,
2959 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3146 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2960 .get_stats_count = qeth_core_get_stats_count, 3147 .get_sset_count = qeth_core_get_sset_count,
2961 .get_drvinfo = qeth_core_get_drvinfo, 3148 .get_drvinfo = qeth_core_get_drvinfo,
2962 .get_settings = qeth_core_ethtool_get_settings, 3149 .get_settings = qeth_core_ethtool_get_settings,
2963}; 3150};
@@ -3058,6 +3245,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3245 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3246 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3247 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3248 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3249
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3250 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3251 return register_netdev(card->dev);
@@ -3092,7 +3280,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3092 index = i % QDIO_MAX_BUFFERS_PER_Q; 3280 index = i % QDIO_MAX_BUFFERS_PER_Q;
3093 buffer = &card->qdio.in_q->bufs[index]; 3281 buffer = &card->qdio.in_q->bufs[index];
3094 if (!(qdio_err && 3282 if (!(qdio_err &&
3095 qeth_check_qdio_errors(buffer->buffer, 3283 qeth_check_qdio_errors(card, buffer->buffer,
3096 qdio_err, "qinerr"))) 3284 qdio_err, "qinerr")))
3097 qeth_l3_process_inbound_buffer(card, buffer, index); 3285 qeth_l3_process_inbound_buffer(card, buffer, index);
3098 /* clear buffer and give back to hardware */ 3286 /* clear buffer and give back to hardware */
@@ -3151,35 +3339,20 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3151 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3339 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3152 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3340 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3153 3341
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155
3156 recover_flag = card->state; 3342 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3343 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3344 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3345 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3346 rc = -ENODEV;
3176 goto out_remove; 3347 goto out_remove;
3177 } 3348 }
3178 3349
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3350 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3351
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3352 if (!card->dev && qeth_l3_setup_netdev(card)) {
3353 rc = -ENODEV;
3182 goto out_remove; 3354 goto out_remove;
3355 }
3183 3356
3184 card->state = CARD_STATE_HARDSETUP; 3357 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3358 qeth_print_status_message(card);
@@ -3196,28 +3369,32 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3369 card->lan_online = 0;
3197 return 0; 3370 return 0;
3198 } 3371 }
3372 rc = -ENODEV;
3199 goto out_remove; 3373 goto out_remove;
3200 } else 3374 } else
3201 card->lan_online = 1; 3375 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send);
3203 3376
3204 rc = qeth_l3_setadapter_parms(card); 3377 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3378 if (rc)
3206 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3379 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3207 rc = qeth_l3_start_ipassists(card); 3380 if (!card->options.sniffer) {
3208 if (rc) 3381 rc = qeth_l3_start_ipassists(card);
3209 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3382 if (rc)
3210 rc = qeth_l3_setrouting_v4(card); 3383 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3211 if (rc) 3384 qeth_l3_set_large_send(card, card->options.large_send);
3212 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3385 rc = qeth_l3_setrouting_v4(card);
3213 rc = qeth_l3_setrouting_v6(card); 3386 if (rc)
3214 if (rc) 3387 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3215 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3388 rc = qeth_l3_setrouting_v6(card);
3389 if (rc)
3390 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3391 }
3216 netif_tx_disable(card->dev); 3392 netif_tx_disable(card->dev);
3217 3393
3218 rc = qeth_init_qdio_queues(card); 3394 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3395 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3396 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3397 rc = -ENODEV;
3221 goto out_remove; 3398 goto out_remove;
3222 } 3399 }
3223 card->state = CARD_STATE_SOFTSETUP; 3400 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3425,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3425 card->state = CARD_STATE_RECOVER;
3249 else 3426 else
3250 card->state = CARD_STATE_DOWN; 3427 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3428 return rc;
3252} 3429}
3253 3430
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3431static int qeth_l3_set_online(struct ccwgroup_device *gdev)
@@ -3358,11 +3535,9 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
3358 if (card->state == CARD_STATE_RECOVER) { 3535 if (card->state == CARD_STATE_RECOVER) {
3359 rc = __qeth_l3_set_online(card->gdev, 1); 3536 rc = __qeth_l3_set_online(card->gdev, 1);
3360 if (rc) { 3537 if (rc) {
3361 if (card->dev) { 3538 rtnl_lock();
3362 rtnl_lock(); 3539 dev_close(card->dev);
3363 dev_close(card->dev); 3540 rtnl_unlock();
3364 rtnl_unlock();
3365 }
3366 } 3541 }
3367 } else 3542 } else
3368 rc = __qeth_l3_set_online(card->gdev, 0); 3543 rc = __qeth_l3_set_online(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..25b3e7aae44f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -8,6 +8,8 @@
8 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 */ 9 */
10 10
11#include <linux/slab.h>
12
11#include "qeth_l3.h" 13#include "qeth_l3.h"
12 14
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -293,31 +295,134 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 295 struct device_attribute *attr, const char *buf, size_t count)
294{ 296{
295 struct qeth_card *card = dev_get_drvdata(dev); 297 struct qeth_card *card = dev_get_drvdata(dev);
298 enum qeth_checksum_types csum_type;
296 char *tmp; 299 char *tmp;
300 int rc;
297 301
298 if (!card) 302 if (!card)
299 return -EINVAL; 303 return -EINVAL;
300 304
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 305 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 306 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 307 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 308 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 309 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 310 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 311 csum_type = NO_CHECKSUMMING;
312 else { 312 else
313 return -EINVAL; 313 return -EINVAL;
314 } 314
315 rc = qeth_l3_set_rx_csum(card, csum_type);
316 if (rc)
317 return rc;
315 return count; 318 return count;
316} 319}
317 320
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 321static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 322 qeth_l3_dev_checksum_store);
320 323
324static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
325 struct device_attribute *attr, char *buf)
326{
327 struct qeth_card *card = dev_get_drvdata(dev);
328
329 if (!card)
330 return -EINVAL;
331
332 return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
333}
334
335static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
336 struct device_attribute *attr, const char *buf, size_t count)
337{
338 struct qeth_card *card = dev_get_drvdata(dev);
339 int ret;
340 unsigned long i;
341
342 if (!card)
343 return -EINVAL;
344
345 if (card->info.type != QETH_CARD_TYPE_IQD)
346 return -EPERM;
347
348 if ((card->state != CARD_STATE_DOWN) &&
349 (card->state != CARD_STATE_RECOVER))
350 return -EPERM;
351
352 ret = strict_strtoul(buf, 16, &i);
353 if (ret)
354 return -EINVAL;
355 switch (i) {
356 case 0:
357 card->options.sniffer = i;
358 break;
359 case 1:
360 ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
361 if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
362 card->options.sniffer = i;
363 if (card->qdio.init_pool.buf_count !=
364 QETH_IN_BUF_COUNT_MAX)
365 qeth_realloc_buffer_pool(card,
366 QETH_IN_BUF_COUNT_MAX);
367 break;
368 } else
369 return -EPERM;
370 default: /* fall through */
371 return -EINVAL;
372 }
373 return count;
374}
375
376static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
377 qeth_l3_dev_sniffer_store);
378
379static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
380 struct device_attribute *attr, char *buf)
381{
382 struct qeth_card *card = dev_get_drvdata(dev);
383
384 if (!card)
385 return -EINVAL;
386
387 switch (card->options.large_send) {
388 case QETH_LARGE_SEND_NO:
389 return sprintf(buf, "%s\n", "no");
390 case QETH_LARGE_SEND_TSO:
391 return sprintf(buf, "%s\n", "TSO");
392 default:
393 return sprintf(buf, "%s\n", "N/A");
394 }
395}
396
397static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
398 struct device_attribute *attr, const char *buf, size_t count)
399{
400 struct qeth_card *card = dev_get_drvdata(dev);
401 enum qeth_large_send_types type;
402 int rc = 0;
403 char *tmp;
404
405 if (!card)
406 return -EINVAL;
407 tmp = strsep((char **) &buf, "\n");
408 if (!strcmp(tmp, "no"))
409 type = QETH_LARGE_SEND_NO;
410 else if (!strcmp(tmp, "TSO"))
411 type = QETH_LARGE_SEND_TSO;
412 else
413 return -EINVAL;
414
415 if (card->options.large_send == type)
416 return count;
417 rc = qeth_l3_set_large_send(card, type);
418 if (rc)
419 return rc;
420 return count;
421}
422
423static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
424 qeth_l3_dev_large_send_store);
425
321static struct attribute *qeth_l3_device_attrs[] = { 426static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 427 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 428 &dev_attr_route6.attr,
@@ -325,6 +430,8 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 430 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 431 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 432 &dev_attr_checksumming.attr,
433 &dev_attr_sniffer.attr,
434 &dev_attr_large_send.attr,
328 NULL, 435 NULL,
329}; 436};
330 437
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3012355f8304..70491274da16 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/slab.h>
27#include <net/iucv/iucv.h> 28#include <net/iucv/iucv.h>
28#include <asm/cpcmd.h> 29#include <asm/cpcmd.h>
29#include <asm/ebcdic.h> 30#include <asm/ebcdic.h>
@@ -31,9 +32,9 @@
31 32
32struct smsg_callback { 33struct smsg_callback {
33 struct list_head list; 34 struct list_head list;
34 char *prefix; 35 const char *prefix;
35 int len; 36 int len;
36 void (*callback)(char *from, char *str); 37 void (*callback)(const char *from, char *str);
37}; 38};
38 39
39MODULE_AUTHOR 40MODULE_AUTHOR
@@ -100,8 +101,8 @@ static void smsg_message_pending(struct iucv_path *path,
100 kfree(buffer); 101 kfree(buffer);
101} 102}
102 103
103int smsg_register_callback(char *prefix, 104int smsg_register_callback(const char *prefix,
104 void (*callback)(char *from, char *str)) 105 void (*callback)(const char *from, char *str))
105{ 106{
106 struct smsg_callback *cb; 107 struct smsg_callback *cb;
107 108
@@ -117,8 +118,9 @@ int smsg_register_callback(char *prefix,
117 return 0; 118 return 0;
118} 119}
119 120
120void smsg_unregister_callback(char *prefix, 121void smsg_unregister_callback(const char *prefix,
121 void (*callback)(char *from, char *str)) 122 void (*callback)(const char *from,
123 char *str))
122{ 124{
123 struct smsg_callback *cb, *tmp; 125 struct smsg_callback *cb, *tmp;
124 126
@@ -168,7 +170,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
168 return 0; 170 return 0;
169} 171}
170 172
171static struct dev_pm_ops smsg_pm_ops = { 173static const struct dev_pm_ops smsg_pm_ops = {
172 .freeze = smsg_pm_freeze, 174 .freeze = smsg_pm_freeze,
173 .thaw = smsg_pm_restore_thaw, 175 .thaw = smsg_pm_restore_thaw,
174 .restore = smsg_pm_restore_thaw, 176 .restore = smsg_pm_restore_thaw,
@@ -176,7 +178,7 @@ static struct dev_pm_ops smsg_pm_ops = {
176 178
177static struct device_driver smsg_driver = { 179static struct device_driver smsg_driver = {
178 .owner = THIS_MODULE, 180 .owner = THIS_MODULE,
179 .name = "SMSGIUCV", 181 .name = SMSGIUCV_DRV_NAME,
180 .bus = &iucv_bus, 182 .bus = &iucv_bus,
181 .pm = &smsg_pm_ops, 183 .pm = &smsg_pm_ops,
182}; 184};
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
index 67f5d4f8378d..149a1151608d 100644
--- a/drivers/s390/net/smsgiucv.h
+++ b/drivers/s390/net/smsgiucv.h
@@ -5,6 +5,10 @@
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */ 6 */
7 7
8int smsg_register_callback(char *, void (*)(char *, char *)); 8#define SMSGIUCV_DRV_NAME "SMSGIUCV"
9void smsg_unregister_callback(char *, void (*)(char *, char *)); 9
10int smsg_register_callback(const char *,
11 void (*)(const char *, char *));
12void smsg_unregister_callback(const char *,
13 void (*)(const char *, char *));
10 14
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
new file mode 100644
index 000000000000..137688790207
--- /dev/null
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -0,0 +1,212 @@
1/*
2 * Deliver z/VM CP special messages (SMSG) as uevents.
3 *
4 * The driver registers for z/VM CP special messages with the
5 * "APP" prefix. Incoming messages are delivered to user space
6 * as uevents.
7 *
8 * Copyright IBM Corp. 2010
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 *
11 */
12#define KMSG_COMPONENT "smsgiucv_app"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15#include <linux/ctype.h>
16#include <linux/err.h>
17#include <linux/device.h>
18#include <linux/list.h>
19#include <linux/kobject.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <net/iucv/iucv.h>
25#include "smsgiucv.h"
26
27/* prefix used for SMSG registration */
28#define SMSG_PREFIX "APP"
29
30/* SMSG related uevent environment variables */
31#define ENV_SENDER_STR "SMSG_SENDER="
32#define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1)
33#define ENV_PREFIX_STR "SMSG_ID="
34#define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \
35 strlen(SMSG_PREFIX) + 1)
36#define ENV_TEXT_STR "SMSG_TEXT="
37#define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
38
39/* z/VM user ID which is permitted to send SMSGs
40 * If the value is undefined or empty (""), special messages are
41 * accepted from any z/VM user ID. */
42static char *sender;
43module_param(sender, charp, 0400);
44MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted");
45
46/* SMSG device representation */
47static struct device *smsg_app_dev;
48
49/* list element for queuing received messages for delivery */
50struct smsg_app_event {
51 struct list_head list;
52 char *buf;
53 char *envp[4];
54};
55
56/* queue for outgoing uevents */
57static LIST_HEAD(smsg_event_queue);
58static DEFINE_SPINLOCK(smsg_event_queue_lock);
59
60static void smsg_app_event_free(struct smsg_app_event *ev)
61{
62 kfree(ev->buf);
63 kfree(ev);
64}
65
66static struct smsg_app_event *smsg_app_event_alloc(const char *from,
67 const char *msg)
68{
69 struct smsg_app_event *ev;
70
71 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
72 if (!ev)
73 return NULL;
74
75 ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN +
76 ENV_TEXT_LEN(msg), GFP_ATOMIC);
77 if (!ev->buf) {
78 kfree(ev);
79 return NULL;
80 }
81
82 /* setting up environment pointers into buf */
83 ev->envp[0] = ev->buf;
84 ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN;
85 ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN;
86 ev->envp[3] = NULL;
87
88 /* setting up environment: sender, prefix name, and message text */
89 snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from);
90 snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX);
91 snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg);
92
93 return ev;
94}
95
96static void smsg_event_work_fn(struct work_struct *work)
97{
98 LIST_HEAD(event_queue);
99 struct smsg_app_event *p, *n;
100 struct device *dev;
101
102 dev = get_device(smsg_app_dev);
103 if (!dev)
104 return;
105
106 spin_lock_bh(&smsg_event_queue_lock);
107 list_splice_init(&smsg_event_queue, &event_queue);
108 spin_unlock_bh(&smsg_event_queue_lock);
109
110 list_for_each_entry_safe(p, n, &event_queue, list) {
111 list_del(&p->list);
112 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
113 smsg_app_event_free(p);
114 }
115
116 put_device(dev);
117}
118static DECLARE_WORK(smsg_event_work, smsg_event_work_fn);
119
120static void smsg_app_callback(const char *from, char *msg)
121{
122 struct smsg_app_event *se;
123
124 /* check if the originating z/VM user ID matches
125 * the configured sender. */
126 if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0)
127 return;
128
129 /* get start of message text (skip prefix and leading blanks) */
130 msg += strlen(SMSG_PREFIX);
131 while (*msg && isspace(*msg))
132 msg++;
133 if (*msg == '\0')
134 return;
135
136 /* allocate event list element and its environment */
137 se = smsg_app_event_alloc(from, msg);
138 if (!se)
139 return;
140
141 /* queue event and schedule work function */
142 spin_lock(&smsg_event_queue_lock);
143 list_add_tail(&se->list, &smsg_event_queue);
144 spin_unlock(&smsg_event_queue_lock);
145
146 schedule_work(&smsg_event_work);
147 return;
148}
149
150static int __init smsgiucv_app_init(void)
151{
152 struct device_driver *smsgiucv_drv;
153 int rc;
154
155 if (!MACHINE_IS_VM)
156 return -ENODEV;
157
158 smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL);
159 if (!smsg_app_dev)
160 return -ENOMEM;
161
162 smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
163 if (!smsgiucv_drv) {
164 kfree(smsg_app_dev);
165 return -ENODEV;
166 }
167
168 rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
169 if (rc) {
170 kfree(smsg_app_dev);
171 goto fail_put_driver;
172 }
173 smsg_app_dev->bus = &iucv_bus;
174 smsg_app_dev->parent = iucv_root;
175 smsg_app_dev->release = (void (*)(struct device *)) kfree;
176 smsg_app_dev->driver = smsgiucv_drv;
177 rc = device_register(smsg_app_dev);
178 if (rc) {
179 put_device(smsg_app_dev);
180 goto fail_put_driver;
181 }
182
183 /* register with the smsgiucv device driver */
184 rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
185 if (rc) {
186 device_unregister(smsg_app_dev);
187 goto fail_put_driver;
188 }
189
190 rc = 0;
191fail_put_driver:
192 put_driver(smsgiucv_drv);
193 return rc;
194}
195module_init(smsgiucv_app_init);
196
197static void __exit smsgiucv_app_exit(void)
198{
199 /* unregister callback */
200 smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback);
201
202 /* cancel pending work and flush any queued event work */
203 cancel_work_sync(&smsg_event_work);
204 smsg_event_work_fn(&smsg_event_work);
205
206 device_unregister(smsg_app_dev);
207}
208module_exit(smsgiucv_app_exit);
209
210MODULE_LICENSE("GPL v2");
211MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents");
212MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2889e5f2dfd3..1e6183a86ce5 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Module interface and handling of zfcp data structures. 4 * Module interface and handling of zfcp data structures.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9/* 9/*
@@ -30,7 +30,10 @@
30 30
31#include <linux/miscdevice.h> 31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/slab.h>
33#include "zfcp_ext.h" 34#include "zfcp_ext.h"
35#include "zfcp_fc.h"
36#include "zfcp_reqlist.h"
34 37
35#define ZFCP_BUS_ID_SIZE 20 38#define ZFCP_BUS_ID_SIZE 20
36 39
@@ -48,80 +51,42 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
48 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
49} 52}
50 53
51static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
52{
53 int idx;
54
55 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
56 GFP_KERNEL);
57 if (!adapter->req_list)
58 return -ENOMEM;
59
60 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
61 INIT_LIST_HEAD(&adapter->req_list[idx]);
62 return 0;
63}
64
65/**
66 * zfcp_reqlist_isempty - is the request list empty
67 * @adapter: pointer to struct zfcp_adapter
68 *
69 * Returns: true if list is empty, false otherwise
70 */
71int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
72{
73 unsigned int idx;
74
75 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
76 if (!list_empty(&adapter->req_list[idx]))
77 return 0;
78 return 1;
79}
80
81static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 54static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
82{ 55{
83 struct ccw_device *ccwdev; 56 struct ccw_device *cdev;
84 struct zfcp_adapter *adapter; 57 struct zfcp_adapter *adapter;
85 struct zfcp_port *port; 58 struct zfcp_port *port;
86 struct zfcp_unit *unit; 59 struct zfcp_unit *unit;
87 60
88 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 61 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
89 if (!ccwdev) 62 if (!cdev)
90 return; 63 return;
91 64
92 if (ccw_device_set_online(ccwdev)) 65 if (ccw_device_set_online(cdev))
93 goto out_ccwdev; 66 goto out_ccw_device;
94 67
95 mutex_lock(&zfcp_data.config_mutex); 68 adapter = zfcp_ccw_adapter_by_cdev(cdev);
96 adapter = dev_get_drvdata(&ccwdev->dev);
97 if (!adapter) 69 if (!adapter)
98 goto out_unlock; 70 goto out_ccw_device;
99 zfcp_adapter_get(adapter);
100 71
101 port = zfcp_get_port_by_wwpn(adapter, wwpn); 72 port = zfcp_get_port_by_wwpn(adapter, wwpn);
102 if (!port) 73 if (!port)
103 goto out_port; 74 goto out_port;
104 75
105 zfcp_port_get(port);
106 unit = zfcp_unit_enqueue(port, lun); 76 unit = zfcp_unit_enqueue(port, lun);
107 if (IS_ERR(unit)) 77 if (IS_ERR(unit))
108 goto out_unit; 78 goto out_unit;
109 mutex_unlock(&zfcp_data.config_mutex);
110 79
111 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); 80 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
112 zfcp_erp_wait(adapter); 81 zfcp_erp_wait(adapter);
113 flush_work(&unit->scsi_work); 82 flush_work(&unit->scsi_work);
114 83
115 mutex_lock(&zfcp_data.config_mutex);
116 zfcp_unit_put(unit);
117out_unit: 84out_unit:
118 zfcp_port_put(port); 85 put_device(&port->dev);
119out_port: 86out_port:
120 zfcp_adapter_put(adapter); 87 zfcp_ccw_adapter_put(adapter);
121out_unlock: 88out_ccw_device:
122 mutex_unlock(&zfcp_data.config_mutex); 89 put_device(&cdev->dev);
123out_ccwdev:
124 put_device(&ccwdev->dev);
125 return; 90 return;
126} 91}
127 92
@@ -167,7 +132,7 @@ static int __init zfcp_module_init(void)
167 int retval = -ENOMEM; 132 int retval = -ENOMEM;
168 133
169 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", 134 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
170 sizeof(struct ct_iu_gpn_ft_req)); 135 sizeof(struct zfcp_fc_gpn_ft_req));
171 if (!zfcp_data.gpn_ft_cache) 136 if (!zfcp_data.gpn_ft_cache)
172 goto out; 137 goto out;
173 138
@@ -182,12 +147,14 @@ static int __init zfcp_module_init(void)
182 goto out_sr_cache; 147 goto out_sr_cache;
183 148
184 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", 149 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
185 sizeof(struct zfcp_gid_pn_data)); 150 sizeof(struct zfcp_fc_gid_pn));
186 if (!zfcp_data.gid_pn_cache) 151 if (!zfcp_data.gid_pn_cache)
187 goto out_gid_cache; 152 goto out_gid_cache;
188 153
189 mutex_init(&zfcp_data.config_mutex); 154 zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
190 rwlock_init(&zfcp_data.config_lock); 155 sizeof(struct zfcp_fc_els_adisc));
156 if (!zfcp_data.adisc_cache)
157 goto out_adisc_cache;
191 158
192 zfcp_data.scsi_transport_template = 159 zfcp_data.scsi_transport_template =
193 fc_attach_transport(&zfcp_transport_functions); 160 fc_attach_transport(&zfcp_transport_functions);
@@ -200,7 +167,7 @@ static int __init zfcp_module_init(void)
200 goto out_misc; 167 goto out_misc;
201 } 168 }
202 169
203 retval = zfcp_ccw_register(); 170 retval = ccw_driver_register(&zfcp_ccw_driver);
204 if (retval) { 171 if (retval) {
205 pr_err("The zfcp device driver could not register with " 172 pr_err("The zfcp device driver could not register with "
206 "the common I/O layer\n"); 173 "the common I/O layer\n");
@@ -216,6 +183,8 @@ out_ccw_register:
216out_misc: 183out_misc:
217 fc_release_transport(zfcp_data.scsi_transport_template); 184 fc_release_transport(zfcp_data.scsi_transport_template);
218out_transport: 185out_transport:
186 kmem_cache_destroy(zfcp_data.adisc_cache);
187out_adisc_cache:
219 kmem_cache_destroy(zfcp_data.gid_pn_cache); 188 kmem_cache_destroy(zfcp_data.gid_pn_cache);
220out_gid_cache: 189out_gid_cache:
221 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 190 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
@@ -229,6 +198,20 @@ out:
229 198
230module_init(zfcp_module_init); 199module_init(zfcp_module_init);
231 200
201static void __exit zfcp_module_exit(void)
202{
203 ccw_driver_unregister(&zfcp_ccw_driver);
204 misc_deregister(&zfcp_cfdc_misc);
205 fc_release_transport(zfcp_data.scsi_transport_template);
206 kmem_cache_destroy(zfcp_data.adisc_cache);
207 kmem_cache_destroy(zfcp_data.gid_pn_cache);
208 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
209 kmem_cache_destroy(zfcp_data.qtcb_cache);
210 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
211}
212
213module_exit(zfcp_module_exit);
214
232/** 215/**
233 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN 216 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
234 * @port: pointer to port to search for unit 217 * @port: pointer to port to search for unit
@@ -238,12 +221,18 @@ module_init(zfcp_module_init);
238 */ 221 */
239struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) 222struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
240{ 223{
224 unsigned long flags;
241 struct zfcp_unit *unit; 225 struct zfcp_unit *unit;
242 226
243 list_for_each_entry(unit, &port->unit_list_head, list) 227 read_lock_irqsave(&port->unit_list_lock, flags);
244 if ((unit->fcp_lun == fcp_lun) && 228 list_for_each_entry(unit, &port->unit_list, list)
245 !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) 229 if (unit->fcp_lun == fcp_lun) {
246 return unit; 230 if (!get_device(&unit->dev))
231 unit = NULL;
232 read_unlock_irqrestore(&port->unit_list_lock, flags);
233 return unit;
234 }
235 read_unlock_irqrestore(&port->unit_list_lock, flags);
247 return NULL; 236 return NULL;
248} 237}
249 238
@@ -257,18 +246,34 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
257struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 246struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
258 u64 wwpn) 247 u64 wwpn)
259{ 248{
249 unsigned long flags;
260 struct zfcp_port *port; 250 struct zfcp_port *port;
261 251
262 list_for_each_entry(port, &adapter->port_list_head, list) 252 read_lock_irqsave(&adapter->port_list_lock, flags);
263 if ((port->wwpn == wwpn) && 253 list_for_each_entry(port, &adapter->port_list, list)
264 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) 254 if (port->wwpn == wwpn) {
255 if (!get_device(&port->dev))
256 port = NULL;
257 read_unlock_irqrestore(&adapter->port_list_lock, flags);
265 return port; 258 return port;
259 }
260 read_unlock_irqrestore(&adapter->port_list_lock, flags);
266 return NULL; 261 return NULL;
267} 262}
268 263
269static void zfcp_sysfs_unit_release(struct device *dev) 264/**
265 * zfcp_unit_release - dequeue unit
266 * @dev: pointer to device
267 *
268 * waits until all work is done on unit and removes it then from the unit->list
269 * of the associated port.
270 */
271static void zfcp_unit_release(struct device *dev)
270{ 272{
271 kfree(container_of(dev, struct zfcp_unit, sysfs_device)); 273 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
274
275 put_device(&unit->port->dev);
276 kfree(unit);
272} 277}
273 278
274/** 279/**
@@ -276,43 +281,40 @@ static void zfcp_sysfs_unit_release(struct device *dev)
276 * @port: pointer to port where unit is added 281 * @port: pointer to port where unit is added
277 * @fcp_lun: FCP LUN of unit to be enqueued 282 * @fcp_lun: FCP LUN of unit to be enqueued
278 * Returns: pointer to enqueued unit on success, ERR_PTR on error 283 * Returns: pointer to enqueued unit on success, ERR_PTR on error
279 * Locks: config_mutex must be held to serialize changes to the unit list
280 * 284 *
281 * Sets up some unit internal structures and creates sysfs entry. 285 * Sets up some unit internal structures and creates sysfs entry.
282 */ 286 */
283struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) 287struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
284{ 288{
285 struct zfcp_unit *unit; 289 struct zfcp_unit *unit;
290 int retval = -ENOMEM;
291
292 get_device(&port->dev);
286 293
287 read_lock_irq(&zfcp_data.config_lock); 294 unit = zfcp_get_unit_by_lun(port, fcp_lun);
288 if (zfcp_get_unit_by_lun(port, fcp_lun)) { 295 if (unit) {
289 read_unlock_irq(&zfcp_data.config_lock); 296 put_device(&unit->dev);
290 return ERR_PTR(-EINVAL); 297 retval = -EEXIST;
298 goto err_out;
291 } 299 }
292 read_unlock_irq(&zfcp_data.config_lock);
293 300
294 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 301 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
295 if (!unit) 302 if (!unit)
296 return ERR_PTR(-ENOMEM); 303 goto err_out;
297
298 atomic_set(&unit->refcount, 0);
299 init_waitqueue_head(&unit->remove_wq);
300 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
301 304
302 unit->port = port; 305 unit->port = port;
303 unit->fcp_lun = fcp_lun; 306 unit->fcp_lun = fcp_lun;
307 unit->dev.parent = &port->dev;
308 unit->dev.release = zfcp_unit_release;
304 309
305 if (dev_set_name(&unit->sysfs_device, "0x%016llx", 310 if (dev_set_name(&unit->dev, "0x%016llx",
306 (unsigned long long) fcp_lun)) { 311 (unsigned long long) fcp_lun)) {
307 kfree(unit); 312 kfree(unit);
308 return ERR_PTR(-ENOMEM); 313 goto err_out;
309 } 314 }
310 unit->sysfs_device.parent = &port->sysfs_device; 315 retval = -EINVAL;
311 unit->sysfs_device.release = zfcp_sysfs_unit_release;
312 dev_set_drvdata(&unit->sysfs_device, unit);
313 316
314 /* mark unit unusable as long as sysfs registration is not complete */ 317 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
315 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
316 318
317 spin_lock_init(&unit->latencies.lock); 319 spin_lock_init(&unit->latencies.lock);
318 unit->latencies.write.channel.min = 0xFFFFFFFF; 320 unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -322,52 +324,31 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
322 unit->latencies.cmd.channel.min = 0xFFFFFFFF; 324 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
323 unit->latencies.cmd.fabric.min = 0xFFFFFFFF; 325 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
324 326
325 if (device_register(&unit->sysfs_device)) { 327 if (device_register(&unit->dev)) {
326 put_device(&unit->sysfs_device); 328 put_device(&unit->dev);
327 return ERR_PTR(-EINVAL); 329 goto err_out;
328 } 330 }
329 331
330 if (sysfs_create_group(&unit->sysfs_device.kobj, 332 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
331 &zfcp_sysfs_unit_attrs)) { 333 goto err_out_put;
332 device_unregister(&unit->sysfs_device);
333 return ERR_PTR(-EINVAL);
334 }
335 334
336 zfcp_unit_get(unit); 335 write_lock_irq(&port->unit_list_lock);
336 list_add_tail(&unit->list, &port->unit_list);
337 write_unlock_irq(&port->unit_list_lock);
337 338
338 write_lock_irq(&zfcp_data.config_lock);
339 list_add_tail(&unit->list, &port->unit_list_head);
340 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
341 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); 339 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
342 340
343 write_unlock_irq(&zfcp_data.config_lock);
344
345 zfcp_port_get(port);
346
347 return unit; 341 return unit;
348}
349 342
350/** 343err_out_put:
351 * zfcp_unit_dequeue - dequeue unit 344 device_unregister(&unit->dev);
352 * @unit: pointer to zfcp_unit 345err_out:
353 * 346 put_device(&port->dev);
354 * waits until all work is done on unit and removes it then from the unit->list 347 return ERR_PTR(retval);
355 * of the associated port.
356 */
357void zfcp_unit_dequeue(struct zfcp_unit *unit)
358{
359 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
360 write_lock_irq(&zfcp_data.config_lock);
361 list_del(&unit->list);
362 write_unlock_irq(&zfcp_data.config_lock);
363 zfcp_port_put(unit->port);
364 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
365 device_unregister(&unit->sysfs_device);
366} 348}
367 349
368static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 350static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
369{ 351{
370 /* must only be called with zfcp_data.config_mutex taken */
371 adapter->pool.erp_req = 352 adapter->pool.erp_req =
372 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 353 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
373 if (!adapter->pool.erp_req) 354 if (!adapter->pool.erp_req)
@@ -405,9 +386,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
405 if (!adapter->pool.status_read_data) 386 if (!adapter->pool.status_read_data)
406 return -ENOMEM; 387 return -ENOMEM;
407 388
408 adapter->pool.gid_pn_data = 389 adapter->pool.gid_pn =
409 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 390 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
410 if (!adapter->pool.gid_pn_data) 391 if (!adapter->pool.gid_pn)
411 return -ENOMEM; 392 return -ENOMEM;
412 393
413 return 0; 394 return 0;
@@ -415,7 +396,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
415 396
416static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 397static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
417{ 398{
418 /* zfcp_data.config_mutex must be held */
419 if (adapter->pool.erp_req) 399 if (adapter->pool.erp_req)
420 mempool_destroy(adapter->pool.erp_req); 400 mempool_destroy(adapter->pool.erp_req);
421 if (adapter->pool.scsi_req) 401 if (adapter->pool.scsi_req)
@@ -428,8 +408,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
428 mempool_destroy(adapter->pool.status_read_req); 408 mempool_destroy(adapter->pool.status_read_req);
429 if (adapter->pool.status_read_data) 409 if (adapter->pool.status_read_data)
430 mempool_destroy(adapter->pool.status_read_data); 410 mempool_destroy(adapter->pool.status_read_data);
431 if (adapter->pool.gid_pn_data) 411 if (adapter->pool.gid_pn)
432 mempool_destroy(adapter->pool.gid_pn_data); 412 mempool_destroy(adapter->pool.gid_pn);
433} 413}
434 414
435/** 415/**
@@ -497,139 +477,142 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
497 * zfcp_adapter_enqueue - enqueue a new adapter to the list 477 * zfcp_adapter_enqueue - enqueue a new adapter to the list
498 * @ccw_device: pointer to the struct cc_device 478 * @ccw_device: pointer to the struct cc_device
499 * 479 *
500 * Returns: 0 if a new adapter was successfully enqueued 480 * Returns: struct zfcp_adapter*
501 * -ENOMEM if alloc failed
502 * Enqueues an adapter at the end of the adapter list in the driver data. 481 * Enqueues an adapter at the end of the adapter list in the driver data.
503 * All adapter internal structures are set up. 482 * All adapter internal structures are set up.
504 * Proc-fs entries are also created. 483 * Proc-fs entries are also created.
505 * locks: config_mutex must be held to serialize changes to the adapter list
506 */ 484 */
507int zfcp_adapter_enqueue(struct ccw_device *ccw_device) 485struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
508{ 486{
509 struct zfcp_adapter *adapter; 487 struct zfcp_adapter *adapter;
510 488
511 /* 489 if (!get_device(&ccw_device->dev))
512 * Note: It is safe to release the list_lock, as any list changes 490 return ERR_PTR(-ENODEV);
513 * are protected by the config_mutex, which must be held to get here
514 */
515 491
516 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); 492 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
517 if (!adapter) 493 if (!adapter) {
518 return -ENOMEM; 494 put_device(&ccw_device->dev);
495 return ERR_PTR(-ENOMEM);
496 }
497
498 kref_init(&adapter->ref);
519 499
520 ccw_device->handler = NULL; 500 ccw_device->handler = NULL;
521 adapter->ccw_device = ccw_device; 501 adapter->ccw_device = ccw_device;
522 atomic_set(&adapter->refcount, 0); 502
503 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
504 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
523 505
524 if (zfcp_qdio_setup(adapter)) 506 if (zfcp_qdio_setup(adapter))
525 goto qdio_failed; 507 goto failed;
526 508
527 if (zfcp_allocate_low_mem_buffers(adapter)) 509 if (zfcp_allocate_low_mem_buffers(adapter))
528 goto low_mem_buffers_failed; 510 goto failed;
529 511
530 if (zfcp_reqlist_alloc(adapter)) 512 adapter->req_list = zfcp_reqlist_alloc();
531 goto low_mem_buffers_failed; 513 if (!adapter->req_list)
514 goto failed;
532 515
533 if (zfcp_dbf_adapter_register(adapter)) 516 if (zfcp_dbf_adapter_register(adapter))
534 goto debug_register_failed; 517 goto failed;
535 518
536 if (zfcp_setup_adapter_work_queue(adapter)) 519 if (zfcp_setup_adapter_work_queue(adapter))
537 goto work_queue_failed; 520 goto failed;
538 521
539 if (zfcp_fc_gs_setup(adapter)) 522 if (zfcp_fc_gs_setup(adapter))
540 goto generic_services_failed; 523 goto failed;
524
525 rwlock_init(&adapter->port_list_lock);
526 INIT_LIST_HEAD(&adapter->port_list);
541 527
542 init_waitqueue_head(&adapter->remove_wq);
543 init_waitqueue_head(&adapter->erp_ready_wq); 528 init_waitqueue_head(&adapter->erp_ready_wq);
544 init_waitqueue_head(&adapter->erp_done_wqh); 529 init_waitqueue_head(&adapter->erp_done_wqh);
545 530
546 INIT_LIST_HEAD(&adapter->port_list_head);
547 INIT_LIST_HEAD(&adapter->erp_ready_head); 531 INIT_LIST_HEAD(&adapter->erp_ready_head);
548 INIT_LIST_HEAD(&adapter->erp_running_head); 532 INIT_LIST_HEAD(&adapter->erp_running_head);
549 533
550 spin_lock_init(&adapter->req_list_lock);
551
552 rwlock_init(&adapter->erp_lock); 534 rwlock_init(&adapter->erp_lock);
553 rwlock_init(&adapter->abort_lock); 535 rwlock_init(&adapter->abort_lock);
554 536
555 if (zfcp_erp_thread_setup(adapter)) 537 if (zfcp_erp_thread_setup(adapter))
556 goto erp_thread_failed; 538 goto failed;
557
558 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
559 INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
560 539
561 adapter->service_level.seq_print = zfcp_print_sl; 540 adapter->service_level.seq_print = zfcp_print_sl;
562 541
563 /* mark adapter unusable as long as sysfs registration is not complete */
564 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
565
566 dev_set_drvdata(&ccw_device->dev, adapter); 542 dev_set_drvdata(&ccw_device->dev, adapter);
567 543
568 if (sysfs_create_group(&ccw_device->dev.kobj, 544 if (sysfs_create_group(&ccw_device->dev.kobj,
569 &zfcp_sysfs_adapter_attrs)) 545 &zfcp_sysfs_adapter_attrs))
570 goto sysfs_failed; 546 goto failed;
571
572 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
573 547
574 if (!zfcp_adapter_scsi_register(adapter)) 548 if (!zfcp_adapter_scsi_register(adapter))
575 return 0; 549 return adapter;
576 550
577sysfs_failed: 551failed:
578 zfcp_erp_thread_kill(adapter); 552 zfcp_adapter_unregister(adapter);
579erp_thread_failed: 553 return ERR_PTR(-ENOMEM);
580 zfcp_fc_gs_destroy(adapter); 554}
581generic_services_failed: 555
556void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
557{
558 struct ccw_device *cdev = adapter->ccw_device;
559
560 cancel_work_sync(&adapter->scan_work);
561 cancel_work_sync(&adapter->stat_work);
582 zfcp_destroy_adapter_work_queue(adapter); 562 zfcp_destroy_adapter_work_queue(adapter);
583work_queue_failed: 563
564 zfcp_fc_wka_ports_force_offline(adapter->gs);
565 zfcp_adapter_scsi_unregister(adapter);
566 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
567
568 zfcp_erp_thread_kill(adapter);
584 zfcp_dbf_adapter_unregister(adapter->dbf); 569 zfcp_dbf_adapter_unregister(adapter->dbf);
585debug_register_failed:
586 dev_set_drvdata(&ccw_device->dev, NULL);
587 kfree(adapter->req_list);
588low_mem_buffers_failed:
589 zfcp_free_low_mem_buffers(adapter);
590qdio_failed:
591 zfcp_qdio_destroy(adapter->qdio); 570 zfcp_qdio_destroy(adapter->qdio);
592 kfree(adapter); 571
593 return -ENOMEM; 572 zfcp_ccw_adapter_put(adapter); /* final put to release */
594} 573}
595 574
596/** 575/**
597 * zfcp_adapter_dequeue - remove the adapter from the resource list 576 * zfcp_adapter_release - remove the adapter from the resource list
598 * @adapter: pointer to struct zfcp_adapter which should be removed 577 * @ref: pointer to struct kref
599 * locks: adapter list write lock is assumed to be held by caller 578 * locks: adapter list write lock is assumed to be held by caller
600 */ 579 */
601void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 580void zfcp_adapter_release(struct kref *ref)
602{ 581{
603 int retval = 0; 582 struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
604 unsigned long flags; 583 ref);
584 struct ccw_device *cdev = adapter->ccw_device;
605 585
606 cancel_work_sync(&adapter->stat_work);
607 zfcp_fc_wka_ports_force_offline(adapter->gs);
608 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
609 &zfcp_sysfs_adapter_attrs);
610 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 586 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
611 /* sanity check: no pending FSF requests */
612 spin_lock_irqsave(&adapter->req_list_lock, flags);
613 retval = zfcp_reqlist_isempty(adapter);
614 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
615 if (!retval)
616 return;
617
618 zfcp_fc_gs_destroy(adapter); 587 zfcp_fc_gs_destroy(adapter);
619 zfcp_erp_thread_kill(adapter);
620 zfcp_destroy_adapter_work_queue(adapter);
621 zfcp_dbf_adapter_unregister(adapter->dbf);
622 zfcp_free_low_mem_buffers(adapter); 588 zfcp_free_low_mem_buffers(adapter);
623 zfcp_qdio_destroy(adapter->qdio);
624 kfree(adapter->req_list); 589 kfree(adapter->req_list);
625 kfree(adapter->fc_stats); 590 kfree(adapter->fc_stats);
626 kfree(adapter->stats_reset_data); 591 kfree(adapter->stats_reset_data);
627 kfree(adapter); 592 kfree(adapter);
593 put_device(&cdev->dev);
628} 594}
629 595
630static void zfcp_sysfs_port_release(struct device *dev) 596/**
597 * zfcp_device_unregister - remove port, unit from system
598 * @dev: reference to device which is to be removed
599 * @grp: related reference to attribute group
600 *
601 * Helper function to unregister port, unit from system
602 */
603void zfcp_device_unregister(struct device *dev,
604 const struct attribute_group *grp)
631{ 605{
632 kfree(container_of(dev, struct zfcp_port, sysfs_device)); 606 sysfs_remove_group(&dev->kobj, grp);
607 device_unregister(dev);
608}
609
610static void zfcp_port_release(struct device *dev)
611{
612 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
613
614 zfcp_ccw_adapter_put(port->adapter);
615 kfree(port);
633} 616}
634 617
635/** 618/**
@@ -639,7 +622,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
639 * @status: initial status for the port 622 * @status: initial status for the port
640 * @d_id: destination id of the remote port to be enqueued 623 * @d_id: destination id of the remote port to be enqueued
641 * Returns: pointer to enqueued port on success, ERR_PTR on error 624 * Returns: pointer to enqueued port on success, ERR_PTR on error
642 * Locks: config_mutex must be held to serialize changes to the port list
643 * 625 *
644 * All port internal structures are set up and the sysfs entry is generated. 626 * All port internal structures are set up and the sysfs entry is generated.
645 * d_id is used to enqueue ports with a well known address like the Directory 627 * d_id is used to enqueue ports with a well known address like the Directory
@@ -649,20 +631,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
649 u32 status, u32 d_id) 631 u32 status, u32 d_id)
650{ 632{
651 struct zfcp_port *port; 633 struct zfcp_port *port;
634 int retval = -ENOMEM;
635
636 kref_get(&adapter->ref);
652 637
653 read_lock_irq(&zfcp_data.config_lock); 638 port = zfcp_get_port_by_wwpn(adapter, wwpn);
654 if (zfcp_get_port_by_wwpn(adapter, wwpn)) { 639 if (port) {
655 read_unlock_irq(&zfcp_data.config_lock); 640 put_device(&port->dev);
656 return ERR_PTR(-EINVAL); 641 retval = -EEXIST;
642 goto err_out;
657 } 643 }
658 read_unlock_irq(&zfcp_data.config_lock);
659 644
660 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 645 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
661 if (!port) 646 if (!port)
662 return ERR_PTR(-ENOMEM); 647 goto err_out;
648
649 rwlock_init(&port->unit_list_lock);
650 INIT_LIST_HEAD(&port->unit_list);
663 651
664 init_waitqueue_head(&port->remove_wq);
665 INIT_LIST_HEAD(&port->unit_list_head);
666 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 652 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
667 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 653 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
668 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); 654 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -671,58 +657,37 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
671 port->d_id = d_id; 657 port->d_id = d_id;
672 port->wwpn = wwpn; 658 port->wwpn = wwpn;
673 port->rport_task = RPORT_NONE; 659 port->rport_task = RPORT_NONE;
660 port->dev.parent = &adapter->ccw_device->dev;
661 port->dev.release = zfcp_port_release;
674 662
675 /* mark port unusable as long as sysfs registration is not complete */ 663 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
676 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
677 atomic_set(&port->refcount, 0);
678
679 if (dev_set_name(&port->sysfs_device, "0x%016llx",
680 (unsigned long long)wwpn)) {
681 kfree(port); 664 kfree(port);
682 return ERR_PTR(-ENOMEM); 665 goto err_out;
683 }
684 port->sysfs_device.parent = &adapter->ccw_device->dev;
685 port->sysfs_device.release = zfcp_sysfs_port_release;
686 dev_set_drvdata(&port->sysfs_device, port);
687
688 if (device_register(&port->sysfs_device)) {
689 put_device(&port->sysfs_device);
690 return ERR_PTR(-EINVAL);
691 } 666 }
667 retval = -EINVAL;
692 668
693 if (sysfs_create_group(&port->sysfs_device.kobj, 669 if (device_register(&port->dev)) {
694 &zfcp_sysfs_port_attrs)) { 670 put_device(&port->dev);
695 device_unregister(&port->sysfs_device); 671 goto err_out;
696 return ERR_PTR(-EINVAL);
697 } 672 }
698 673
699 zfcp_port_get(port); 674 if (sysfs_create_group(&port->dev.kobj,
675 &zfcp_sysfs_port_attrs))
676 goto err_out_put;
700 677
701 write_lock_irq(&zfcp_data.config_lock); 678 write_lock_irq(&adapter->port_list_lock);
702 list_add_tail(&port->list, &adapter->port_list_head); 679 list_add_tail(&port->list, &adapter->port_list);
703 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 680 write_unlock_irq(&adapter->port_list_lock);
704 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
705 681
706 write_unlock_irq(&zfcp_data.config_lock); 682 atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
707 683
708 zfcp_adapter_get(adapter);
709 return port; 684 return port;
710}
711 685
712/** 686err_out_put:
713 * zfcp_port_dequeue - dequeues a port from the port list of the adapter 687 device_unregister(&port->dev);
714 * @port: pointer to struct zfcp_port which should be removed 688err_out:
715 */ 689 zfcp_ccw_adapter_put(adapter);
716void zfcp_port_dequeue(struct zfcp_port *port) 690 return ERR_PTR(retval);
717{
718 write_lock_irq(&zfcp_data.config_lock);
719 list_del(&port->list);
720 write_unlock_irq(&zfcp_data.config_lock);
721 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
722 cancel_work_sync(&port->rport_work); /* usually not necessary */
723 zfcp_adapter_put(port->adapter);
724 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
725 device_unregister(&port->sysfs_device);
726} 691}
727 692
728/** 693/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e08339428ecf..ce1cc7a11fb4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,38 +3,45 @@
3 * 3 *
4 * Registration and callback for the s390 common I/O layer. 4 * Registration and callback for the s390 common I/O layer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include "zfcp_reqlist.h"
13 14
14#define ZFCP_MODEL_PRIV 0x4 15#define ZFCP_MODEL_PRIV 0x4
15 16
16static int zfcp_ccw_suspend(struct ccw_device *cdev) 17static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
17 18
19struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
18{ 20{
19 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 21 struct zfcp_adapter *adapter;
20 22 unsigned long flags;
21 if (!adapter)
22 return 0;
23
24 mutex_lock(&zfcp_data.config_mutex);
25 23
26 zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); 24 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
27 zfcp_erp_wait(adapter); 25 adapter = dev_get_drvdata(&cdev->dev);
26 if (adapter)
27 kref_get(&adapter->ref);
28 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
29 return adapter;
30}
28 31
29 mutex_unlock(&zfcp_data.config_mutex); 32void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
33{
34 unsigned long flags;
30 35
31 return 0; 36 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
37 kref_put(&adapter->ref, zfcp_adapter_release);
38 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
32} 39}
33 40
34static int zfcp_ccw_activate(struct ccw_device *cdev) 41static int zfcp_ccw_activate(struct ccw_device *cdev)
35 42
36{ 43{
37 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 44 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
38 45
39 if (!adapter) 46 if (!adapter)
40 return 0; 47 return 0;
@@ -46,6 +53,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 zfcp_erp_wait(adapter); 53 zfcp_erp_wait(adapter);
47 flush_work(&adapter->scan_work); 54 flush_work(&adapter->scan_work);
48 55
56 zfcp_ccw_adapter_put(adapter);
57
49 return 0; 58 return 0;
50} 59}
51 60
@@ -67,28 +76,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
67 76
68/** 77/**
69 * zfcp_ccw_probe - probe function of zfcp driver 78 * zfcp_ccw_probe - probe function of zfcp driver
70 * @ccw_device: pointer to belonging ccw device 79 * @cdev: pointer to belonging ccw device
71 * 80 *
72 * This function gets called by the common i/o layer for each FCP 81 * This function gets called by the common i/o layer for each FCP
73 * device found on the current system. This is only a stub to make cio 82 * device found on the current system. This is only a stub to make cio
74 * work: To only allocate adapter resources for devices actually used, 83 * work: To only allocate adapter resources for devices actually used,
75 * the allocation is deferred to the first call to ccw_set_online. 84 * the allocation is deferred to the first call to ccw_set_online.
76 */ 85 */
77static int zfcp_ccw_probe(struct ccw_device *ccw_device) 86static int zfcp_ccw_probe(struct ccw_device *cdev)
78{ 87{
79 return 0; 88 return 0;
80} 89}
81 90
82/** 91/**
83 * zfcp_ccw_remove - remove function of zfcp driver 92 * zfcp_ccw_remove - remove function of zfcp driver
84 * @ccw_device: pointer to belonging ccw device 93 * @cdev: pointer to belonging ccw device
85 * 94 *
86 * This function gets called by the common i/o layer and removes an adapter 95 * This function gets called by the common i/o layer and removes an adapter
87 * from the system. Task of this function is to get rid of all units and 96 * from the system. Task of this function is to get rid of all units and
88 * ports that belong to this adapter. And in addition all resources of this 97 * ports that belong to this adapter. And in addition all resources of this
89 * adapter will be freed too. 98 * adapter will be freed too.
90 */ 99 */
91static void zfcp_ccw_remove(struct ccw_device *ccw_device) 100static void zfcp_ccw_remove(struct ccw_device *cdev)
92{ 101{
93 struct zfcp_adapter *adapter; 102 struct zfcp_adapter *adapter;
94 struct zfcp_port *port, *p; 103 struct zfcp_port *port, *p;
@@ -96,49 +105,35 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
96 LIST_HEAD(unit_remove_lh); 105 LIST_HEAD(unit_remove_lh);
97 LIST_HEAD(port_remove_lh); 106 LIST_HEAD(port_remove_lh);
98 107
99 ccw_device_set_offline(ccw_device); 108 ccw_device_set_offline(cdev);
100 109
101 mutex_lock(&zfcp_data.config_mutex); 110 adapter = zfcp_ccw_adapter_by_cdev(cdev);
102 adapter = dev_get_drvdata(&ccw_device->dev);
103 if (!adapter) 111 if (!adapter)
104 goto out; 112 return;
105 mutex_unlock(&zfcp_data.config_mutex);
106 113
107 cancel_work_sync(&adapter->scan_work); 114 write_lock_irq(&adapter->port_list_lock);
108 115 list_for_each_entry_safe(port, p, &adapter->port_list, list) {
109 mutex_lock(&zfcp_data.config_mutex); 116 write_lock(&port->unit_list_lock);
110 117 list_for_each_entry_safe(unit, u, &port->unit_list, list)
111 /* this also removes the scsi devices, so call it first */
112 zfcp_adapter_scsi_unregister(adapter);
113
114 write_lock_irq(&zfcp_data.config_lock);
115 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
116 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
117 list_move(&unit->list, &unit_remove_lh); 118 list_move(&unit->list, &unit_remove_lh);
118 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 119 write_unlock(&port->unit_list_lock);
119 &unit->status);
120 }
121 list_move(&port->list, &port_remove_lh); 120 list_move(&port->list, &port_remove_lh);
122 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
123 } 121 }
124 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 122 write_unlock_irq(&adapter->port_list_lock);
125 write_unlock_irq(&zfcp_data.config_lock); 123 zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
126 124
127 list_for_each_entry_safe(port, p, &port_remove_lh, list) { 125 list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
128 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) 126 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
129 zfcp_unit_dequeue(unit);
130 zfcp_port_dequeue(port);
131 }
132 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
133 zfcp_adapter_dequeue(adapter);
134 127
135out: 128 list_for_each_entry_safe(port, p, &port_remove_lh, list)
136 mutex_unlock(&zfcp_data.config_mutex); 129 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
130
131 zfcp_adapter_unregister(adapter);
137} 132}
138 133
139/** 134/**
140 * zfcp_ccw_set_online - set_online function of zfcp driver 135 * zfcp_ccw_set_online - set_online function of zfcp driver
141 * @ccw_device: pointer to belonging ccw device 136 * @cdev: pointer to belonging ccw device
142 * 137 *
143 * This function gets called by the common i/o layer and sets an 138 * This function gets called by the common i/o layer and sets an
144 * adapter into state online. The first call will allocate all 139 * adapter into state online. The first call will allocate all
@@ -149,27 +144,24 @@ out:
149 * the SCSI stack, that the QDIO queues will be set up and that the 144 * the SCSI stack, that the QDIO queues will be set up and that the
150 * adapter will be opened. 145 * adapter will be opened.
151 */ 146 */
152static int zfcp_ccw_set_online(struct ccw_device *ccw_device) 147static int zfcp_ccw_set_online(struct ccw_device *cdev)
153{ 148{
154 struct zfcp_adapter *adapter; 149 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
155 int ret = 0;
156
157 mutex_lock(&zfcp_data.config_mutex);
158 adapter = dev_get_drvdata(&ccw_device->dev);
159 150
160 if (!adapter) { 151 if (!adapter) {
161 ret = zfcp_adapter_enqueue(ccw_device); 152 adapter = zfcp_adapter_enqueue(cdev);
162 if (ret) { 153
163 dev_err(&ccw_device->dev, 154 if (IS_ERR(adapter)) {
155 dev_err(&cdev->dev,
164 "Setting up data structures for the " 156 "Setting up data structures for the "
165 "FCP adapter failed\n"); 157 "FCP adapter failed\n");
166 goto out; 158 return PTR_ERR(adapter);
167 } 159 }
168 adapter = dev_get_drvdata(&ccw_device->dev); 160 kref_get(&adapter->ref);
169 } 161 }
170 162
171 /* initialize request counter */ 163 /* initialize request counter */
172 BUG_ON(!zfcp_reqlist_isempty(adapter)); 164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
173 adapter->req_no = 0; 165 adapter->req_no = 0;
174 166
175 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, 167 zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
@@ -177,58 +169,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
177 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
178 "ccsonl2", NULL); 170 "ccsonl2", NULL);
179 zfcp_erp_wait(adapter); 171 zfcp_erp_wait(adapter);
180out: 172
181 mutex_unlock(&zfcp_data.config_mutex); 173 flush_work(&adapter->scan_work);
182 if (!ret) 174
183 flush_work(&adapter->scan_work); 175 zfcp_ccw_adapter_put(adapter);
184 return ret; 176 return 0;
185} 177}
186 178
187/** 179/**
188 * zfcp_ccw_set_offline - set_offline function of zfcp driver 180 * zfcp_ccw_set_offline - set_offline function of zfcp driver
189 * @ccw_device: pointer to belonging ccw device 181 * @cdev: pointer to belonging ccw device
190 * 182 *
191 * This function gets called by the common i/o layer and sets an adapter 183 * This function gets called by the common i/o layer and sets an adapter
192 * into state offline. 184 * into state offline.
193 */ 185 */
194static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) 186static int zfcp_ccw_set_offline(struct ccw_device *cdev)
195{ 187{
196 struct zfcp_adapter *adapter; 188 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
189
190 if (!adapter)
191 return 0;
197 192
198 mutex_lock(&zfcp_data.config_mutex);
199 adapter = dev_get_drvdata(&ccw_device->dev);
200 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 193 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
201 zfcp_erp_wait(adapter); 194 zfcp_erp_wait(adapter);
202 mutex_unlock(&zfcp_data.config_mutex); 195
196 zfcp_ccw_adapter_put(adapter);
203 return 0; 197 return 0;
204} 198}
205 199
206/** 200/**
207 * zfcp_ccw_notify - ccw notify function 201 * zfcp_ccw_notify - ccw notify function
208 * @ccw_device: pointer to belonging ccw device 202 * @cdev: pointer to belonging ccw device
209 * @event: indicates if adapter was detached or attached 203 * @event: indicates if adapter was detached or attached
210 * 204 *
211 * This function gets called by the common i/o layer if an adapter has gone 205 * This function gets called by the common i/o layer if an adapter has gone
212 * or reappeared. 206 * or reappeared.
213 */ 207 */
214static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) 208static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
215{ 209{
216 struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev); 210 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
211
212 if (!adapter)
213 return 1;
217 214
218 switch (event) { 215 switch (event) {
219 case CIO_GONE: 216 case CIO_GONE:
220 dev_warn(&adapter->ccw_device->dev, 217 dev_warn(&cdev->dev, "The FCP device has been detached\n");
221 "The FCP device has been detached\n");
222 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); 218 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
223 break; 219 break;
224 case CIO_NO_PATH: 220 case CIO_NO_PATH:
225 dev_warn(&adapter->ccw_device->dev, 221 dev_warn(&cdev->dev,
226 "The CHPID for the FCP device is offline\n"); 222 "The CHPID for the FCP device is offline\n");
227 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); 223 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
228 break; 224 break;
229 case CIO_OPER: 225 case CIO_OPER:
230 dev_info(&adapter->ccw_device->dev, 226 dev_info(&cdev->dev, "The FCP device is operational again\n");
231 "The FCP device is operational again\n");
232 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 227 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
233 ZFCP_STATUS_COMMON_RUNNING, 228 ZFCP_STATUS_COMMON_RUNNING,
234 ZFCP_SET); 229 ZFCP_SET);
@@ -236,11 +231,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
236 "ccnoti4", NULL); 231 "ccnoti4", NULL);
237 break; 232 break;
238 case CIO_BOXED: 233 case CIO_BOXED:
239 dev_warn(&adapter->ccw_device->dev, "The FCP device " 234 dev_warn(&cdev->dev, "The FCP device did not respond within "
240 "did not respond within the specified time\n"); 235 "the specified time\n");
241 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 236 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
242 break; 237 break;
243 } 238 }
239
240 zfcp_ccw_adapter_put(adapter);
244 return 1; 241 return 1;
245} 242}
246 243
@@ -250,18 +247,16 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
250 */ 247 */
251static void zfcp_ccw_shutdown(struct ccw_device *cdev) 248static void zfcp_ccw_shutdown(struct ccw_device *cdev)
252{ 249{
253 struct zfcp_adapter *adapter; 250 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
254 251
255 mutex_lock(&zfcp_data.config_mutex);
256 adapter = dev_get_drvdata(&cdev->dev);
257 if (!adapter) 252 if (!adapter)
258 goto out; 253 return;
259 254
260 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 255 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
261 zfcp_erp_wait(adapter); 256 zfcp_erp_wait(adapter);
262 zfcp_erp_thread_kill(adapter); 257 zfcp_erp_thread_kill(adapter);
263out: 258
264 mutex_unlock(&zfcp_data.config_mutex); 259 zfcp_ccw_adapter_put(adapter);
265} 260}
266 261
267struct ccw_driver zfcp_ccw_driver = { 262struct ccw_driver zfcp_ccw_driver = {
@@ -274,18 +269,7 @@ struct ccw_driver zfcp_ccw_driver = {
274 .set_offline = zfcp_ccw_set_offline, 269 .set_offline = zfcp_ccw_set_offline,
275 .notify = zfcp_ccw_notify, 270 .notify = zfcp_ccw_notify,
276 .shutdown = zfcp_ccw_shutdown, 271 .shutdown = zfcp_ccw_shutdown,
277 .freeze = zfcp_ccw_suspend, 272 .freeze = zfcp_ccw_set_offline,
278 .thaw = zfcp_ccw_activate, 273 .thaw = zfcp_ccw_activate,
279 .restore = zfcp_ccw_activate, 274 .restore = zfcp_ccw_activate,
280}; 275};
281
282/**
283 * zfcp_ccw_register - ccw register function
284 *
285 * Registers the driver at the common i/o layer. This function will be called
286 * at module load time/system start.
287 */
288int __init zfcp_ccw_register(void)
289{
290 return ccw_driver_register(&zfcp_ccw_driver);
291}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index ef681dfed0cc..25d9e0ae9c57 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -10,8 +10,10 @@
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 12
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
16#include <asm/compat.h>
15#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
16#include "zfcp_def.h" 18#include "zfcp_def.h"
17#include "zfcp_ext.h" 19#include "zfcp_ext.h"
@@ -86,22 +88,17 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) 88static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
87{ 89{
88 char busid[9]; 90 char busid[9];
89 struct ccw_device *ccwdev; 91 struct ccw_device *cdev;
90 struct zfcp_adapter *adapter = NULL; 92 struct zfcp_adapter *adapter;
91 93
92 snprintf(busid, sizeof(busid), "0.0.%04x", devno); 94 snprintf(busid, sizeof(busid), "0.0.%04x", devno);
93 ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 95 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
94 if (!ccwdev) 96 if (!cdev)
95 goto out; 97 return NULL;
96 98
97 adapter = dev_get_drvdata(&ccwdev->dev); 99 adapter = zfcp_ccw_adapter_by_cdev(cdev);
98 if (!adapter) 100
99 goto out_put; 101 put_device(&cdev->dev);
100
101 zfcp_adapter_get(adapter);
102out_put:
103 put_device(&ccwdev->dev);
104out:
105 return adapter; 102 return adapter;
106} 103}
107 104
@@ -168,7 +165,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
168} 165}
169 166
170static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 167static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
171 unsigned long buffer) 168 unsigned long arg)
172{ 169{
173 struct zfcp_cfdc_data *data; 170 struct zfcp_cfdc_data *data;
174 struct zfcp_cfdc_data __user *data_user; 171 struct zfcp_cfdc_data __user *data_user;
@@ -180,7 +177,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
180 if (command != ZFCP_CFDC_IOC) 177 if (command != ZFCP_CFDC_IOC)
181 return -ENOTTY; 178 return -ENOTTY;
182 179
183 data_user = (void __user *) buffer; 180 if (is_compat_task())
181 data_user = compat_ptr(arg);
182 else
183 data_user = (void __user *)arg;
184
184 if (!data_user) 185 if (!data_user)
185 return -EINVAL; 186 return -EINVAL;
186 187
@@ -212,7 +213,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
212 retval = -ENXIO; 213 retval = -ENXIO;
213 goto free_buffer; 214 goto free_buffer;
214 } 215 }
215 zfcp_adapter_get(adapter);
216 216
217 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, 217 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
218 data_user->control_file); 218 data_user->control_file);
@@ -245,7 +245,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
245 free_sg: 245 free_sg:
246 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); 246 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
247 adapter_put: 247 adapter_put:
248 zfcp_adapter_put(adapter); 248 zfcp_ccw_adapter_put(adapter);
249 free_buffer: 249 free_buffer:
250 kfree(data); 250 kfree(data);
251 no_mem_sense: 251 no_mem_sense:
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 215b70749e95..075852f6968c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -10,9 +10,11 @@
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <linux/slab.h>
13#include <asm/debug.h> 14#include <asm/debug.h>
14#include "zfcp_dbf.h" 15#include "zfcp_dbf.h"
15#include "zfcp_ext.h" 16#include "zfcp_ext.h"
17#include "zfcp_fc.h"
16 18
17static u32 dbfsize = 4; 19static u32 dbfsize = 4;
18 20
@@ -139,9 +141,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
139 memcpy(response->fsf_status_qual, 141 memcpy(response->fsf_status_qual,
140 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 142 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
141 response->fsf_req_status = fsf_req->status; 143 response->fsf_req_status = fsf_req->status;
142 response->sbal_first = fsf_req->queue_req.sbal_first; 144 response->sbal_first = fsf_req->qdio_req.sbal_first;
143 response->sbal_last = fsf_req->queue_req.sbal_last; 145 response->sbal_last = fsf_req->qdio_req.sbal_last;
144 response->sbal_response = fsf_req->queue_req.sbal_response; 146 response->sbal_response = fsf_req->qdio_req.sbal_response;
145 response->pool = fsf_req->pool != NULL; 147 response->pool = fsf_req->pool != NULL;
146 response->erp_action = (unsigned long)fsf_req->erp_action; 148 response->erp_action = (unsigned long)fsf_req->erp_action;
147 149
@@ -177,8 +179,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
177 179
178 case FSF_QTCB_SEND_ELS: 180 case FSF_QTCB_SEND_ELS:
179 send_els = (struct zfcp_send_els *)fsf_req->data; 181 send_els = (struct zfcp_send_els *)fsf_req->data;
180 response->u.els.d_id = qtcb->bottom.support.d_id; 182 response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
181 response->u.els.ls_code = send_els->ls_code >> 24;
182 break; 183 break;
183 184
184 case FSF_QTCB_ABORT_FCP_CMND: 185 case FSF_QTCB_ABORT_FCP_CMND:
@@ -327,7 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
327 break; 328 break;
328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 329 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 330 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
330 p += sprintf(*p, "\n"); 331 *p += sprintf(*p, "\n");
331 break; 332 break;
332 333
333 case FSF_QTCB_OPEN_PORT_WITH_DID: 334 case FSF_QTCB_OPEN_PORT_WITH_DID:
@@ -348,7 +349,6 @@ static void zfcp_dbf_hba_view_response(char **p,
348 349
349 case FSF_QTCB_SEND_ELS: 350 case FSF_QTCB_SEND_ELS:
350 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); 351 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
351 zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
352 break; 352 break;
353 353
354 case FSF_QTCB_ABORT_FCP_CMND: 354 case FSF_QTCB_ABORT_FCP_CMND:
@@ -577,7 +577,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
577 struct zfcp_adapter *adapter = dbf->adapter; 577 struct zfcp_adapter *adapter = dbf->adapter;
578 578
579 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, 579 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
580 &adapter->erp_counter, 0, 0, 0); 580 &adapter->erp_counter, 0, 0,
581 ZFCP_DBF_INVALID_LUN);
581} 582}
582 583
583/** 584/**
@@ -591,8 +592,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
591 struct zfcp_dbf *dbf = port->adapter->dbf; 592 struct zfcp_dbf *dbf = port->adapter->dbf;
592 593
593 zfcp_dbf_rec_target(id, ref, dbf, &port->status, 594 zfcp_dbf_rec_target(id, ref, dbf, &port->status,
594 &port->erp_counter, port->wwpn, port->d_id, 595 &port->erp_counter, port->wwpn, port->d_id,
595 0); 596 ZFCP_DBF_INVALID_LUN);
596} 597}
597 598
598/** 599/**
@@ -643,10 +644,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
643 r->u.trigger.ps = atomic_read(&port->status); 644 r->u.trigger.ps = atomic_read(&port->status);
644 r->u.trigger.wwpn = port->wwpn; 645 r->u.trigger.wwpn = port->wwpn;
645 } 646 }
646 if (unit) { 647 if (unit)
647 r->u.trigger.us = atomic_read(&unit->status); 648 r->u.trigger.us = atomic_read(&unit->status);
648 r->u.trigger.fcp_lun = unit->fcp_lun; 649 r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
649 }
650 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); 650 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
651 spin_unlock_irqrestore(&dbf->rec_lock, flags); 651 spin_unlock_irqrestore(&dbf->rec_lock, flags);
652} 652}
@@ -669,7 +669,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
669 r->u.action.action = (unsigned long)erp_action; 669 r->u.action.action = (unsigned long)erp_action;
670 r->u.action.status = erp_action->status; 670 r->u.action.status = erp_action->status;
671 r->u.action.step = erp_action->step; 671 r->u.action.step = erp_action->step;
672 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 672 r->u.action.fsf_req = erp_action->fsf_req_id;
673 debug_event(dbf->rec, 5, r, sizeof(*r)); 673 debug_event(dbf->rec, 5, r, sizeof(*r));
674 spin_unlock_irqrestore(&dbf->rec_lock, flags); 674 spin_unlock_irqrestore(&dbf->rec_lock, flags);
675} 675}
@@ -677,14 +677,14 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
677/** 677/**
678 * zfcp_dbf_san_ct_request - trace event for issued CT request 678 * zfcp_dbf_san_ct_request - trace event for issued CT request
679 * @fsf_req: request containing issued CT data 679 * @fsf_req: request containing issued CT data
680 * @d_id: destination id where ct request is sent to
680 */ 681 */
681void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) 682void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
682{ 683{
683 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 684 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
684 struct zfcp_wka_port *wka_port = ct->wka_port; 685 struct zfcp_adapter *adapter = fsf_req->adapter;
685 struct zfcp_adapter *adapter = wka_port->adapter;
686 struct zfcp_dbf *dbf = adapter->dbf; 686 struct zfcp_dbf *dbf = adapter->dbf;
687 struct ct_hdr *hdr = sg_virt(ct->req); 687 struct fc_ct_hdr *hdr = sg_virt(ct->req);
688 struct zfcp_dbf_san_record *r = &dbf->san_buf; 688 struct zfcp_dbf_san_record *r = &dbf->san_buf;
689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; 689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
690 int level = 3; 690 int level = 3;
@@ -695,19 +695,18 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
696 r->fsf_reqid = fsf_req->req_id; 696 r->fsf_reqid = fsf_req->req_id;
697 r->fsf_seqno = fsf_req->seq_no; 697 r->fsf_seqno = fsf_req->seq_no;
698 r->s_id = fc_host_port_id(adapter->scsi_host); 698 oct->d_id = d_id;
699 r->d_id = wka_port->d_id; 699 oct->cmd_req_code = hdr->ct_cmd;
700 oct->cmd_req_code = hdr->cmd_rsp_code; 700 oct->revision = hdr->ct_rev;
701 oct->revision = hdr->revision; 701 oct->gs_type = hdr->ct_fs_type;
702 oct->gs_type = hdr->gs_type; 702 oct->gs_subtype = hdr->ct_fs_subtype;
703 oct->gs_subtype = hdr->gs_subtype; 703 oct->options = hdr->ct_options;
704 oct->options = hdr->options; 704 oct->max_res_size = hdr->ct_mr_size;
705 oct->max_res_size = hdr->max_res_size; 705 oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
706 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
707 ZFCP_DBF_SAN_MAX_PAYLOAD); 706 ZFCP_DBF_SAN_MAX_PAYLOAD);
708 debug_event(dbf->san, level, r, sizeof(*r)); 707 debug_event(dbf->san, level, r, sizeof(*r));
709 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 708 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
710 (void *)hdr + sizeof(struct ct_hdr), oct->len); 709 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
711 spin_unlock_irqrestore(&dbf->san_lock, flags); 710 spin_unlock_irqrestore(&dbf->san_lock, flags);
712} 711}
713 712
@@ -717,10 +716,9 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
717 */ 716 */
718void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) 717void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
719{ 718{
720 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 719 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
721 struct zfcp_wka_port *wka_port = ct->wka_port; 720 struct zfcp_adapter *adapter = fsf_req->adapter;
722 struct zfcp_adapter *adapter = wka_port->adapter; 721 struct fc_ct_hdr *hdr = sg_virt(ct->resp);
723 struct ct_hdr *hdr = sg_virt(ct->resp);
724 struct zfcp_dbf *dbf = adapter->dbf; 722 struct zfcp_dbf *dbf = adapter->dbf;
725 struct zfcp_dbf_san_record *r = &dbf->san_buf; 723 struct zfcp_dbf_san_record *r = &dbf->san_buf;
726 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; 724 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
@@ -732,25 +730,23 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
732 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 730 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
733 r->fsf_reqid = fsf_req->req_id; 731 r->fsf_reqid = fsf_req->req_id;
734 r->fsf_seqno = fsf_req->seq_no; 732 r->fsf_seqno = fsf_req->seq_no;
735 r->s_id = wka_port->d_id; 733 rct->cmd_rsp_code = hdr->ct_cmd;
736 r->d_id = fc_host_port_id(adapter->scsi_host); 734 rct->revision = hdr->ct_rev;
737 rct->cmd_rsp_code = hdr->cmd_rsp_code; 735 rct->reason_code = hdr->ct_reason;
738 rct->revision = hdr->revision; 736 rct->expl = hdr->ct_explan;
739 rct->reason_code = hdr->reason_code; 737 rct->vendor_unique = hdr->ct_vendor;
740 rct->expl = hdr->reason_code_expl; 738 rct->max_res_size = hdr->ct_mr_size;
741 rct->vendor_unique = hdr->vendor_unique; 739 rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
742 rct->max_res_size = hdr->max_res_size;
743 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
744 ZFCP_DBF_SAN_MAX_PAYLOAD); 740 ZFCP_DBF_SAN_MAX_PAYLOAD);
745 debug_event(dbf->san, level, r, sizeof(*r)); 741 debug_event(dbf->san, level, r, sizeof(*r));
746 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 742 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
747 (void *)hdr + sizeof(struct ct_hdr), rct->len); 743 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
748 spin_unlock_irqrestore(&dbf->san_lock, flags); 744 spin_unlock_irqrestore(&dbf->san_lock, flags);
749} 745}
750 746
751static void zfcp_dbf_san_els(const char *tag, int level, 747static void zfcp_dbf_san_els(const char *tag, int level,
752 struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, 748 struct zfcp_fsf_req *fsf_req, u32 d_id,
753 u8 ls_code, void *buffer, int buflen) 749 void *buffer, int buflen)
754{ 750{
755 struct zfcp_adapter *adapter = fsf_req->adapter; 751 struct zfcp_adapter *adapter = fsf_req->adapter;
756 struct zfcp_dbf *dbf = adapter->dbf; 752 struct zfcp_dbf *dbf = adapter->dbf;
@@ -762,9 +758,7 @@ static void zfcp_dbf_san_els(const char *tag, int level,
762 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 758 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
763 rec->fsf_reqid = fsf_req->req_id; 759 rec->fsf_reqid = fsf_req->req_id;
764 rec->fsf_seqno = fsf_req->seq_no; 760 rec->fsf_seqno = fsf_req->seq_no;
765 rec->s_id = s_id; 761 rec->u.els.d_id = d_id;
766 rec->d_id = d_id;
767 rec->u.els.ls_code = ls_code;
768 debug_event(dbf->san, level, rec, sizeof(*rec)); 762 debug_event(dbf->san, level, rec, sizeof(*rec));
769 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, 763 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
770 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 764 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
@@ -777,12 +771,11 @@ static void zfcp_dbf_san_els(const char *tag, int level,
777 */ 771 */
778void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) 772void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
779{ 773{
780 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 774 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
775 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
781 776
782 zfcp_dbf_san_els("oels", 2, fsf_req, 777 zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
783 fc_host_port_id(els->adapter->scsi_host), 778 sg_virt(els->req), els->req->length);
784 els->d_id, *(u8 *) sg_virt(els->req),
785 sg_virt(els->req), els->req->length);
786} 779}
787 780
788/** 781/**
@@ -791,12 +784,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
791 */ 784 */
792void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) 785void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
793{ 786{
794 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 787 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
788 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
795 789
796 zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, 790 zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
797 fc_host_port_id(els->adapter->scsi_host), 791 sg_virt(els->resp), els->resp->length);
798 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
799 els->resp->length);
800} 792}
801 793
802/** 794/**
@@ -805,16 +797,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
805 */ 797 */
806void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) 798void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
807{ 799{
808 struct zfcp_adapter *adapter = fsf_req->adapter;
809 struct fsf_status_read_buffer *buf = 800 struct fsf_status_read_buffer *buf =
810 (struct fsf_status_read_buffer *)fsf_req->data; 801 (struct fsf_status_read_buffer *)fsf_req->data;
811 int length = (int)buf->length - 802 int length = (int)buf->length -
812 (int)((void *)&buf->payload - (void *)buf); 803 (int)((void *)&buf->payload - (void *)buf);
813 804
814 zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, 805 zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
815 fc_host_port_id(adapter->scsi_host), 806 (void *)buf->payload.data, length);
816 buf->payload.data[0], (void *)buf->payload.data,
817 length);
818} 807}
819 808
820static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, 809static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
@@ -829,11 +818,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
829 zfcp_dbf_tag(&p, "tag", r->tag); 818 zfcp_dbf_tag(&p, "tag", r->tag);
830 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 819 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
831 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 820 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
832 zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
833 zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
834 821
835 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { 822 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
836 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; 823 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
824 zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
837 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); 825 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
838 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); 826 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
839 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); 827 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -852,7 +840,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
852 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 840 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
853 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 841 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
854 struct zfcp_dbf_san_record_els *els = &r->u.els; 842 struct zfcp_dbf_san_record_els *els = &r->u.els;
855 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 843 zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
856 } 844 }
857 return p - out_buf; 845 return p - out_buf;
858} 846}
@@ -870,8 +858,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
870 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; 858 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
871 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 859 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
872 unsigned long flags; 860 unsigned long flags;
873 struct fcp_rsp_iu *fcp_rsp; 861 struct fcp_resp_with_ext *fcp_rsp;
874 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; 862 struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
863 char *fcp_sns_info = NULL;
875 int offset = 0, buflen = 0; 864 int offset = 0, buflen = 0;
876 865
877 spin_lock_irqsave(&dbf->scsi_lock, flags); 866 spin_lock_irqsave(&dbf->scsi_lock, flags);
@@ -895,20 +884,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
895 rec->scsi_allowed = scsi_cmnd->allowed; 884 rec->scsi_allowed = scsi_cmnd->allowed;
896 } 885 }
897 if (fsf_req != NULL) { 886 if (fsf_req != NULL) {
898 fcp_rsp = (struct fcp_rsp_iu *) 887 fcp_rsp = (struct fcp_resp_with_ext *)
899 &(fsf_req->qtcb->bottom.io.fcp_rsp); 888 &(fsf_req->qtcb->bottom.io.fcp_rsp);
900 fcp_rsp_info = (unsigned char *) &fcp_rsp[1]; 889 fcp_rsp_info = (struct fcp_resp_rsp_info *)
901 fcp_sns_info = 890 &fcp_rsp[1];
902 zfcp_get_fcp_sns_info_ptr(fcp_rsp); 891 fcp_sns_info = (char *) &fcp_rsp[1];
903 892 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
904 rec->rsp_validity = fcp_rsp->validity.value; 893 fcp_sns_info += fcp_rsp->ext.fr_sns_len;
905 rec->rsp_scsi_status = fcp_rsp->scsi_status; 894
906 rec->rsp_resid = fcp_rsp->fcp_resid; 895 rec->rsp_validity = fcp_rsp->resp.fr_flags;
907 if (fcp_rsp->validity.bits.fcp_rsp_len_valid) 896 rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
908 rec->rsp_code = *(fcp_rsp_info + 3); 897 rec->rsp_resid = fcp_rsp->ext.fr_resid;
909 if (fcp_rsp->validity.bits.fcp_sns_len_valid) { 898 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
910 buflen = min((int)fcp_rsp->fcp_sns_len, 899 rec->rsp_code = fcp_rsp_info->rsp_code;
911 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); 900 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
901 buflen = min(fcp_rsp->ext.fr_sns_len,
902 (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
912 rec->sns_info_len = buflen; 903 rec->sns_info_len = buflen;
913 memcpy(rec->sns_info, fcp_sns_info, 904 memcpy(rec->sns_info, fcp_sns_info,
914 min(buflen, 905 min(buflen,
@@ -1067,6 +1058,8 @@ err_out:
1067 */ 1058 */
1068void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) 1059void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
1069{ 1060{
1061 if (!dbf)
1062 return;
1070 debug_unregister(dbf->scsi); 1063 debug_unregister(dbf->scsi);
1071 debug_unregister(dbf->san); 1064 debug_unregister(dbf->san);
1072 debug_unregister(dbf->hba); 1065 debug_unregister(dbf->hba);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 6b1461e8f847..457e046f2d28 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -22,6 +22,7 @@
22#ifndef ZFCP_DBF_H 22#ifndef ZFCP_DBF_H
23#define ZFCP_DBF_H 23#define ZFCP_DBF_H
24 24
25#include <scsi/fc/fc_fcp.h>
25#include "zfcp_ext.h" 26#include "zfcp_ext.h"
26#include "zfcp_fsf.h" 27#include "zfcp_fsf.h"
27#include "zfcp_def.h" 28#include "zfcp_def.h"
@@ -29,6 +30,8 @@
29#define ZFCP_DBF_TAG_SIZE 4 30#define ZFCP_DBF_TAG_SIZE 4
30#define ZFCP_DBF_ID_SIZE 7 31#define ZFCP_DBF_ID_SIZE 7
31 32
33#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
34
32struct zfcp_dbf_dump { 35struct zfcp_dbf_dump {
33 u8 tag[ZFCP_DBF_TAG_SIZE]; 36 u8 tag[ZFCP_DBF_TAG_SIZE];
34 u32 total_size; /* size of total dump data */ 37 u32 total_size; /* size of total dump data */
@@ -122,7 +125,6 @@ struct zfcp_dbf_hba_record_response {
122 } unit; 125 } unit;
123 struct { 126 struct {
124 u32 d_id; 127 u32 d_id;
125 u8 ls_code;
126 } els; 128 } els;
127 } u; 129 } u;
128} __attribute__ ((packed)); 130} __attribute__ ((packed));
@@ -166,6 +168,7 @@ struct zfcp_dbf_san_record_ct_request {
166 u8 options; 168 u8 options;
167 u16 max_res_size; 169 u16 max_res_size;
168 u32 len; 170 u32 len;
171 u32 d_id;
169} __attribute__ ((packed)); 172} __attribute__ ((packed));
170 173
171struct zfcp_dbf_san_record_ct_response { 174struct zfcp_dbf_san_record_ct_response {
@@ -179,25 +182,22 @@ struct zfcp_dbf_san_record_ct_response {
179} __attribute__ ((packed)); 182} __attribute__ ((packed));
180 183
181struct zfcp_dbf_san_record_els { 184struct zfcp_dbf_san_record_els {
182 u8 ls_code; 185 u32 d_id;
183 u32 len;
184} __attribute__ ((packed)); 186} __attribute__ ((packed));
185 187
186struct zfcp_dbf_san_record { 188struct zfcp_dbf_san_record {
187 u8 tag[ZFCP_DBF_TAG_SIZE]; 189 u8 tag[ZFCP_DBF_TAG_SIZE];
188 u64 fsf_reqid; 190 u64 fsf_reqid;
189 u32 fsf_seqno; 191 u32 fsf_seqno;
190 u32 s_id;
191 u32 d_id;
192 union { 192 union {
193 struct zfcp_dbf_san_record_ct_request ct_req; 193 struct zfcp_dbf_san_record_ct_request ct_req;
194 struct zfcp_dbf_san_record_ct_response ct_resp; 194 struct zfcp_dbf_san_record_ct_response ct_resp;
195 struct zfcp_dbf_san_record_els els; 195 struct zfcp_dbf_san_record_els els;
196 } u; 196 } u;
197#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
198 u8 payload[32];
199} __attribute__ ((packed)); 197} __attribute__ ((packed));
200 198
199#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
200
201struct zfcp_dbf_scsi_record { 201struct zfcp_dbf_scsi_record {
202 u8 tag[ZFCP_DBF_TAG_SIZE]; 202 u8 tag[ZFCP_DBF_TAG_SIZE];
203 u8 tag2[ZFCP_DBF_TAG_SIZE]; 203 u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -303,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
303 303
304/** 304/**
305 * zfcp_dbf_scsi_result - trace event for SCSI command completion 305 * zfcp_dbf_scsi_result - trace event for SCSI command completion
306 * @tag: tag indicating success or failure of SCSI command 306 * @dbf: adapter dbf trace
307 * @level: trace level applicable for this event 307 * @scmd: SCSI command pointer
308 * @adapter: adapter that has been used to issue the SCSI command 308 * @req: FSF request used to issue SCSI command
309 */
310static inline
311void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
312 struct zfcp_fsf_req *req)
313{
314 if (scmd->result != 0)
315 zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
316 else if (scmd->retries > 0)
317 zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
318 else
319 zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
320}
321
322/**
323 * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
324 * @dbf: adapter dbf trace
309 * @scmd: SCSI command pointer 325 * @scmd: SCSI command pointer
310 * @fsf_req: request used to issue SCSI command (might be NULL)
311 */ 326 */
312static inline 327static inline
313void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, 328void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
314 struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
315{ 329{
316 zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); 330 zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
317} 331}
318 332
319/** 333/**
@@ -343,7 +357,7 @@ static inline
343void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 357void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
344 struct scsi_cmnd *scsi_cmnd) 358 struct scsi_cmnd *scsi_cmnd)
345{ 359{
346 zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, 360 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
347 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 361 unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
348} 362}
349 363
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7da2fad8f515..7131c7db1f04 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Global definitions for the zfcp device driver. 4 * Global definitions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h> 34#include <scsi/scsi_bsg_fc.h>
35#include <asm/ccwdev.h> 35#include <asm/ccwdev.h>
36#include <asm/qdio.h>
37#include <asm/debug.h> 36#include <asm/debug.h>
38#include <asm/ebcdic.h> 37#include <asm/ebcdic.h>
39#include <asm/sysinfo.h> 38#include <asm/sysinfo.h>
40#include "zfcp_fsf.h" 39#include "zfcp_fsf.h"
40#include "zfcp_qdio.h"
41 41
42/********************* GENERAL DEFINES *********************************/ 42struct zfcp_reqlist;
43
44#define REQUEST_LIST_SIZE 128
45 43
46/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
47#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) 45#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@@ -71,131 +69,6 @@
71/* timeout value for "default timer" for fsf requests */ 69/* timeout value for "default timer" for fsf requests */
72#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 70#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
73 71
74/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
75
76/* task attribute values in FCP-2 FCP_CMND IU */
77#define SIMPLE_Q 0
78#define HEAD_OF_Q 1
79#define ORDERED_Q 2
80#define ACA_Q 4
81#define UNTAGGED 5
82
83/* task management flags in FCP-2 FCP_CMND IU */
84#define FCP_CLEAR_ACA 0x40
85#define FCP_TARGET_RESET 0x20
86#define FCP_LOGICAL_UNIT_RESET 0x10
87#define FCP_CLEAR_TASK_SET 0x04
88#define FCP_ABORT_TASK_SET 0x02
89
90#define FCP_CDB_LENGTH 16
91
92#define ZFCP_DID_MASK 0x00FFFFFF
93
94/* FCP(-2) FCP_CMND IU */
95struct fcp_cmnd_iu {
96 u64 fcp_lun; /* FCP logical unit number */
97 u8 crn; /* command reference number */
98 u8 reserved0:5; /* reserved */
99 u8 task_attribute:3; /* task attribute */
100 u8 task_management_flags; /* task management flags */
101 u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
102 u8 rddata:1; /* read data */
103 u8 wddata:1; /* write data */
104 u8 fcp_cdb[FCP_CDB_LENGTH];
105} __attribute__((packed));
106
107/* FCP(-2) FCP_RSP IU */
108struct fcp_rsp_iu {
109 u8 reserved0[10];
110 union {
111 struct {
112 u8 reserved1:3;
113 u8 fcp_conf_req:1;
114 u8 fcp_resid_under:1;
115 u8 fcp_resid_over:1;
116 u8 fcp_sns_len_valid:1;
117 u8 fcp_rsp_len_valid:1;
118 } bits;
119 u8 value;
120 } validity;
121 u8 scsi_status;
122 u32 fcp_resid;
123 u32 fcp_sns_len;
124 u32 fcp_rsp_len;
125} __attribute__((packed));
126
127
128#define RSP_CODE_GOOD 0
129#define RSP_CODE_LENGTH_MISMATCH 1
130#define RSP_CODE_FIELD_INVALID 2
131#define RSP_CODE_RO_MISMATCH 3
132#define RSP_CODE_TASKMAN_UNSUPP 4
133#define RSP_CODE_TASKMAN_FAILED 5
134
135/* see fc-fs */
136#define LS_RSCN 0x61
137#define LS_LOGO 0x05
138#define LS_PLOGI 0x03
139
140struct fcp_rscn_head {
141 u8 command;
142 u8 page_length; /* always 0x04 */
143 u16 payload_len;
144} __attribute__((packed));
145
146struct fcp_rscn_element {
147 u8 reserved:2;
148 u8 event_qual:4;
149 u8 addr_format:2;
150 u32 nport_did:24;
151} __attribute__((packed));
152
153/* see fc-ph */
154struct fcp_logo {
155 u32 command;
156 u32 nport_did;
157 u64 nport_wwpn;
158} __attribute__((packed));
159
160/*
161 * FC-FS stuff
162 */
163#define R_A_TOV 10 /* seconds */
164
165#define ZFCP_LS_RLS 0x0f
166#define ZFCP_LS_ADISC 0x52
167#define ZFCP_LS_RPS 0x56
168#define ZFCP_LS_RSCN 0x61
169#define ZFCP_LS_RNID 0x78
170
171struct zfcp_ls_adisc {
172 u8 code;
173 u8 field[3];
174 u32 hard_nport_id;
175 u64 wwpn;
176 u64 wwnn;
177 u32 nport_id;
178} __attribute__ ((packed));
179
180/*
181 * FC-GS-2 stuff
182 */
183#define ZFCP_CT_REVISION 0x01
184#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
185#define ZFCP_CT_NAME_SERVER 0x02
186#define ZFCP_CT_SYNCHRONOUS 0x00
187#define ZFCP_CT_SCSI_FCP 0x08
188#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
189#define ZFCP_CT_GID_PN 0x0121
190#define ZFCP_CT_GPN_FT 0x0172
191#define ZFCP_CT_ACCEPT 0x8002
192#define ZFCP_CT_REJECT 0x8001
193
194/*
195 * FC-GS-4 stuff
196 */
197#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
198
199/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 72/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
200 73
201/* 74/*
@@ -205,7 +78,6 @@ struct zfcp_ls_adisc {
205#define ZFCP_COMMON_FLAGS 0xfff00000 78#define ZFCP_COMMON_FLAGS 0xfff00000
206 79
207/* common status bits */ 80/* common status bits */
208#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
209#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 81#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
210#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 82#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
211#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 83#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
@@ -222,21 +94,10 @@ struct zfcp_ls_adisc {
222#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 94#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
223#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 95#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
224 96
225/* FC-PH/FC-GS well-known address identifiers for generic services */
226#define ZFCP_DID_WKA 0xFFFFF0
227
228/* remote port status */ 97/* remote port status */
229#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 98#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
230#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 99#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
231 100
232/* well known address (WKA) port status*/
233enum zfcp_wka_status {
234 ZFCP_WKA_PORT_OFFLINE,
235 ZFCP_WKA_PORT_CLOSING,
236 ZFCP_WKA_PORT_OPENING,
237 ZFCP_WKA_PORT_ONLINE,
238};
239
240/* logical unit status */ 101/* logical unit status */
241#define ZFCP_STATUS_UNIT_SHARED 0x00000004 102#define ZFCP_STATUS_UNIT_SHARED 0x00000004
242#define ZFCP_STATUS_UNIT_READONLY 0x00000008 103#define ZFCP_STATUS_UNIT_READONLY 0x00000008
@@ -247,10 +108,7 @@ enum zfcp_wka_status {
247#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 108#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
248#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 109#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
249#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 110#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
250#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
251#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 111#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
252#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
253#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
254#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 112#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
255 113
256/************************* STRUCTURE DEFINITIONS *****************************/ 114/************************* STRUCTURE DEFINITIONS *****************************/
@@ -265,131 +123,10 @@ struct zfcp_adapter_mempool {
265 mempool_t *scsi_abort; 123 mempool_t *scsi_abort;
266 mempool_t *status_read_req; 124 mempool_t *status_read_req;
267 mempool_t *status_read_data; 125 mempool_t *status_read_data;
268 mempool_t *gid_pn_data; 126 mempool_t *gid_pn;
269 mempool_t *qtcb_pool; 127 mempool_t *qtcb_pool;
270}; 128};
271 129
272/*
273 * header for CT_IU
274 */
275struct ct_hdr {
276 u8 revision; // 0x01
277 u8 in_id[3]; // 0x00
278 u8 gs_type; // 0xFC Directory Service
279 u8 gs_subtype; // 0x02 Name Server
280 u8 options; // 0x00 single bidirectional exchange
281 u8 reserved0;
282 u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
283 u16 max_res_size; // <= (4096 - 16) / 4
284 u8 reserved1;
285 u8 reason_code;
286 u8 reason_code_expl;
287 u8 vendor_unique;
288} __attribute__ ((packed));
289
290/* nameserver request CT_IU -- for requests where
291 * a port name is required */
292struct ct_iu_gid_pn_req {
293 struct ct_hdr header;
294 u64 wwpn;
295} __attribute__ ((packed));
296
297/* FS_ACC IU and data unit for GID_PN nameserver request */
298struct ct_iu_gid_pn_resp {
299 struct ct_hdr header;
300 u32 d_id;
301} __attribute__ ((packed));
302
303struct ct_iu_gpn_ft_req {
304 struct ct_hdr header;
305 u8 flags;
306 u8 domain_id_scope;
307 u8 area_id_scope;
308 u8 fc4_type;
309} __attribute__ ((packed));
310
311
312/**
313 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
314 * @wka_port: port where the request is sent to
315 * @req: scatter-gather list for request
316 * @resp: scatter-gather list for response
317 * @handler: handler function (called for response to the request)
318 * @handler_data: data passed to handler function
319 * @completion: completion for synchronization purposes
320 * @status: used to pass error status to calling function
321 */
322struct zfcp_send_ct {
323 struct zfcp_wka_port *wka_port;
324 struct scatterlist *req;
325 struct scatterlist *resp;
326 void (*handler)(unsigned long);
327 unsigned long handler_data;
328 struct completion *completion;
329 int status;
330};
331
332/* used for name server requests in error recovery */
333struct zfcp_gid_pn_data {
334 struct zfcp_send_ct ct;
335 struct scatterlist req;
336 struct scatterlist resp;
337 struct ct_iu_gid_pn_req ct_iu_req;
338 struct ct_iu_gid_pn_resp ct_iu_resp;
339 struct zfcp_port *port;
340};
341
342/**
343 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
344 * @adapter: adapter where request is sent from
345 * @port: port where ELS is destinated (port reference count has to be increased)
346 * @d_id: destiniation id of port where request is sent to
347 * @req: scatter-gather list for request
348 * @resp: scatter-gather list for response
349 * @handler: handler function (called for response to the request)
350 * @handler_data: data passed to handler function
351 * @completion: completion for synchronization purposes
352 * @ls_code: hex code of ELS command
353 * @status: used to pass error status to calling function
354 */
355struct zfcp_send_els {
356 struct zfcp_adapter *adapter;
357 struct zfcp_port *port;
358 u32 d_id;
359 struct scatterlist *req;
360 struct scatterlist *resp;
361 void (*handler)(unsigned long);
362 unsigned long handler_data;
363 struct completion *completion;
364 int ls_code;
365 int status;
366};
367
368struct zfcp_wka_port {
369 struct zfcp_adapter *adapter;
370 wait_queue_head_t completion_wq;
371 enum zfcp_wka_status status;
372 atomic_t refcount;
373 u32 d_id;
374 u32 handle;
375 struct mutex mutex;
376 struct delayed_work work;
377};
378
379struct zfcp_wka_ports {
380 struct zfcp_wka_port ms; /* management service */
381 struct zfcp_wka_port ts; /* time service */
382 struct zfcp_wka_port ds; /* directory service */
383 struct zfcp_wka_port as; /* alias service */
384 struct zfcp_wka_port ks; /* key distribution service */
385};
386
387struct zfcp_qdio_queue {
388 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
389 u8 first; /* index of next free bfr in queue */
390 atomic_t count; /* number of free buffers in queue */
391};
392
393struct zfcp_erp_action { 130struct zfcp_erp_action {
394 struct list_head list; 131 struct list_head list;
395 int action; /* requested action code */ 132 int action; /* requested action code */
@@ -398,8 +135,7 @@ struct zfcp_erp_action {
398 struct zfcp_unit *unit; 135 struct zfcp_unit *unit;
399 u32 status; /* recovery status */ 136 u32 status; /* recovery status */
400 u32 step; /* active step of this erp action */ 137 u32 step; /* active step of this erp action */
401 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 138 unsigned long fsf_req_id;
402 for this action */
403 struct timer_list timer; 139 struct timer_list timer;
404}; 140};
405 141
@@ -422,33 +158,8 @@ struct zfcp_latencies {
422 spinlock_t lock; 158 spinlock_t lock;
423}; 159};
424 160
425/** struct zfcp_qdio - basic QDIO data structure
426 * @resp_q: response queue
427 * @req_q: request queue
428 * @stat_lock: lock to protect req_q_util and req_q_time
429 * @req_q_lock; lock to serialize access to request queue
430 * @req_q_time: time of last fill level change
431 * @req_q_util: used for accounting
432 * @req_q_full: queue full incidents
433 * @req_q_wq: used to wait for SBAL availability
434 * @adapter: adapter used in conjunction with this QDIO structure
435 */
436struct zfcp_qdio {
437 struct zfcp_qdio_queue resp_q;
438 struct zfcp_qdio_queue req_q;
439 spinlock_t stat_lock;
440 spinlock_t req_q_lock;
441 unsigned long long req_q_time;
442 u64 req_q_util;
443 atomic_t req_q_full;
444 wait_queue_head_t req_q_wq;
445 struct zfcp_adapter *adapter;
446};
447
448struct zfcp_adapter { 161struct zfcp_adapter {
449 atomic_t refcount; /* reference count */ 162 struct kref ref;
450 wait_queue_head_t remove_wq; /* can be used to wait for
451 refcount drop to zero */
452 u64 peer_wwnn; /* P2P peer WWNN */ 163 u64 peer_wwnn; /* P2P peer WWNN */
453 u64 peer_wwpn; /* P2P peer WWPN */ 164 u64 peer_wwpn; /* P2P peer WWPN */
454 u32 peer_d_id; /* P2P peer D_ID */ 165 u32 peer_d_id; /* P2P peer D_ID */
@@ -461,10 +172,10 @@ struct zfcp_adapter {
461 u32 hardware_version; /* of FCP channel */ 172 u32 hardware_version; /* of FCP channel */
462 u16 timer_ticks; /* time int for a tick */ 173 u16 timer_ticks; /* time int for a tick */
463 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 174 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
464 struct list_head port_list_head; /* remote port list */ 175 struct list_head port_list; /* remote port list */
176 rwlock_t port_list_lock; /* port list lock */
465 unsigned long req_no; /* unique FSF req number */ 177 unsigned long req_no; /* unique FSF req number */
466 struct list_head *req_list; /* list of pending reqs */ 178 struct zfcp_reqlist *req_list;
467 spinlock_t req_list_lock; /* request list lock */
468 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 179 u32 fsf_req_seq_no; /* FSF cmnd seq number */
469 rwlock_t abort_lock; /* Protects against SCSI 180 rwlock_t abort_lock; /* Protects against SCSI
470 stack abort/command 181 stack abort/command
@@ -485,7 +196,7 @@ struct zfcp_adapter {
485 u32 erp_low_mem_count; /* nr of erp actions waiting 196 u32 erp_low_mem_count; /* nr of erp actions waiting
486 for memory */ 197 for memory */
487 struct task_struct *erp_thread; 198 struct task_struct *erp_thread;
488 struct zfcp_wka_ports *gs; /* generic services */ 199 struct zfcp_fc_wka_ports *gs; /* generic services */
489 struct zfcp_dbf *dbf; /* debug traces */ 200 struct zfcp_dbf *dbf; /* debug traces */
490 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 201 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
491 struct fc_host_statistics *fc_stats; 202 struct fc_host_statistics *fc_stats;
@@ -497,14 +208,12 @@ struct zfcp_adapter {
497}; 208};
498 209
499struct zfcp_port { 210struct zfcp_port {
500 struct device sysfs_device; /* sysfs device */ 211 struct device dev;
501 struct fc_rport *rport; /* rport of fc transport class */ 212 struct fc_rport *rport; /* rport of fc transport class */
502 struct list_head list; /* list of remote ports */ 213 struct list_head list; /* list of remote ports */
503 atomic_t refcount; /* reference count */
504 wait_queue_head_t remove_wq; /* can be used to wait for
505 refcount drop to zero */
506 struct zfcp_adapter *adapter; /* adapter used to access port */ 214 struct zfcp_adapter *adapter; /* adapter used to access port */
507 struct list_head unit_list_head; /* head of logical unit list */ 215 struct list_head unit_list; /* head of logical unit list */
216 rwlock_t unit_list_lock; /* unit list lock */
508 atomic_t status; /* status of this remote port */ 217 atomic_t status; /* status of this remote port */
509 u64 wwnn; /* WWNN if known */ 218 u64 wwnn; /* WWNN if known */
510 u64 wwpn; /* WWPN */ 219 u64 wwpn; /* WWPN */
@@ -521,11 +230,8 @@ struct zfcp_port {
521}; 230};
522 231
523struct zfcp_unit { 232struct zfcp_unit {
524 struct device sysfs_device; /* sysfs device */ 233 struct device dev;
525 struct list_head list; /* list of logical units */ 234 struct list_head list; /* list of logical units */
526 atomic_t refcount; /* reference count */
527 wait_queue_head_t remove_wq; /* can be used to wait for
528 refcount drop to zero */
529 struct zfcp_port *port; /* remote port of unit */ 235 struct zfcp_port *port; /* remote port of unit */
530 atomic_t status; /* status of this logical unit */ 236 atomic_t status; /* status of this logical unit */
531 u64 fcp_lun; /* own FCP_LUN */ 237 u64 fcp_lun; /* own FCP_LUN */
@@ -538,33 +244,11 @@ struct zfcp_unit {
538}; 244};
539 245
540/** 246/**
541 * struct zfcp_queue_req - queue related values for a request
542 * @sbal_number: number of free SBALs
543 * @sbal_first: first SBAL for this request
544 * @sbal_last: last SBAL for this request
545 * @sbal_limit: last possible SBAL for this request
546 * @sbale_curr: current SBALE at creation of this request
547 * @sbal_response: SBAL used in interrupt
548 * @qdio_outb_usage: usage of outbound queue
549 * @qdio_inb_usage: usage of inbound queue
550 */
551struct zfcp_queue_req {
552 u8 sbal_number;
553 u8 sbal_first;
554 u8 sbal_last;
555 u8 sbal_limit;
556 u8 sbale_curr;
557 u8 sbal_response;
558 u16 qdio_outb_usage;
559 u16 qdio_inb_usage;
560};
561
562/**
563 * struct zfcp_fsf_req - basic FSF request structure 247 * struct zfcp_fsf_req - basic FSF request structure
564 * @list: list of FSF requests 248 * @list: list of FSF requests
565 * @req_id: unique request ID 249 * @req_id: unique request ID
566 * @adapter: adapter this request belongs to 250 * @adapter: adapter this request belongs to
567 * @queue_req: queue related values 251 * @qdio_req: qdio queue related values
568 * @completion: used to signal the completion of the request 252 * @completion: used to signal the completion of the request
569 * @status: status of the request 253 * @status: status of the request
570 * @fsf_command: FSF command issued 254 * @fsf_command: FSF command issued
@@ -582,7 +266,7 @@ struct zfcp_fsf_req {
582 struct list_head list; 266 struct list_head list;
583 unsigned long req_id; 267 unsigned long req_id;
584 struct zfcp_adapter *adapter; 268 struct zfcp_adapter *adapter;
585 struct zfcp_queue_req queue_req; 269 struct zfcp_qdio_req qdio_req;
586 struct completion completion; 270 struct completion completion;
587 u32 status; 271 u32 status;
588 u32 fsf_command; 272 u32 fsf_command;
@@ -601,14 +285,11 @@ struct zfcp_fsf_req {
601struct zfcp_data { 285struct zfcp_data {
602 struct scsi_host_template scsi_host_template; 286 struct scsi_host_template scsi_host_template;
603 struct scsi_transport_template *scsi_transport_template; 287 struct scsi_transport_template *scsi_transport_template;
604 rwlock_t config_lock; /* serialises changes
605 to adapter/port/unit
606 lists */
607 struct mutex config_mutex;
608 struct kmem_cache *gpn_ft_cache; 288 struct kmem_cache *gpn_ft_cache;
609 struct kmem_cache *qtcb_cache; 289 struct kmem_cache *qtcb_cache;
610 struct kmem_cache *sr_buffer_cache; 290 struct kmem_cache *sr_buffer_cache;
611 struct kmem_cache *gid_pn_cache; 291 struct kmem_cache *gid_pn_cache;
292 struct kmem_cache *adisc_cache;
612}; 293};
613 294
614/********************** ZFCP SPECIFIC DEFINES ********************************/ 295/********************** ZFCP SPECIFIC DEFINES ********************************/
@@ -616,88 +297,4 @@ struct zfcp_data {
616#define ZFCP_SET 0x00000100 297#define ZFCP_SET 0x00000100
617#define ZFCP_CLEAR 0x00000200 298#define ZFCP_CLEAR 0x00000200
618 299
619/*
620 * Helper functions for request ID management.
621 */
622static inline int zfcp_reqlist_hash(unsigned long req_id)
623{
624 return req_id % REQUEST_LIST_SIZE;
625}
626
627static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
628 struct zfcp_fsf_req *fsf_req)
629{
630 list_del(&fsf_req->list);
631}
632
633static inline struct zfcp_fsf_req *
634zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
635{
636 struct zfcp_fsf_req *request;
637 unsigned int idx;
638
639 idx = zfcp_reqlist_hash(req_id);
640 list_for_each_entry(request, &adapter->req_list[idx], list)
641 if (request->req_id == req_id)
642 return request;
643 return NULL;
644}
645
646static inline struct zfcp_fsf_req *
647zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
648{
649 struct zfcp_fsf_req *request;
650 unsigned int idx;
651
652 for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
653 list_for_each_entry(request, &adapter->req_list[idx], list)
654 if (request == req)
655 return request;
656 }
657 return NULL;
658}
659
660/*
661 * functions needed for reference/usage counting
662 */
663
664static inline void
665zfcp_unit_get(struct zfcp_unit *unit)
666{
667 atomic_inc(&unit->refcount);
668}
669
670static inline void
671zfcp_unit_put(struct zfcp_unit *unit)
672{
673 if (atomic_dec_return(&unit->refcount) == 0)
674 wake_up(&unit->remove_wq);
675}
676
677static inline void
678zfcp_port_get(struct zfcp_port *port)
679{
680 atomic_inc(&port->refcount);
681}
682
683static inline void
684zfcp_port_put(struct zfcp_port *port)
685{
686 if (atomic_dec_return(&port->refcount) == 0)
687 wake_up(&port->remove_wq);
688}
689
690static inline void
691zfcp_adapter_get(struct zfcp_adapter *adapter)
692{
693 atomic_inc(&adapter->refcount);
694}
695
696static inline void
697zfcp_adapter_put(struct zfcp_adapter *adapter)
698{
699 if (atomic_dec_return(&adapter->refcount) == 0)
700 wake_up(&adapter->remove_wq);
701}
702
703#endif /* ZFCP_DEF_H */ 300#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index f73e2180f333..0be5e7ea2828 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
11 11
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14#include "zfcp_reqlist.h"
14 15
15#define ZFCP_MAX_ERPS 3 16#define ZFCP_MAX_ERPS 3
16 17
@@ -99,9 +100,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
99 100
100 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 101 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
101 zfcp_erp_action_dismiss(&port->erp_action); 102 zfcp_erp_action_dismiss(&port->erp_action);
102 else 103 else {
103 list_for_each_entry(unit, &port->unit_list_head, list) 104 read_lock(&port->unit_list_lock);
104 zfcp_erp_action_dismiss_unit(unit); 105 list_for_each_entry(unit, &port->unit_list, list)
106 zfcp_erp_action_dismiss_unit(unit);
107 read_unlock(&port->unit_list_lock);
108 }
105} 109}
106 110
107static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,9 +114,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
110 114
111 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 115 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
112 zfcp_erp_action_dismiss(&adapter->erp_action); 116 zfcp_erp_action_dismiss(&adapter->erp_action);
113 else 117 else {
114 list_for_each_entry(port, &adapter->port_list_head, list) 118 read_lock(&adapter->port_list_lock);
119 list_for_each_entry(port, &adapter->port_list, list)
115 zfcp_erp_action_dismiss_port(port); 120 zfcp_erp_action_dismiss_port(port);
121 read_unlock(&adapter->port_list_lock);
122 }
116} 123}
117 124
118static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 125static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
@@ -168,7 +175,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
168 175
169 switch (need) { 176 switch (need) {
170 case ZFCP_ERP_ACTION_REOPEN_UNIT: 177 case ZFCP_ERP_ACTION_REOPEN_UNIT:
171 zfcp_unit_get(unit); 178 if (!get_device(&unit->dev))
179 return NULL;
172 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 180 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
173 erp_action = &unit->erp_action; 181 erp_action = &unit->erp_action;
174 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 182 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
@@ -177,7 +185,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
177 185
178 case ZFCP_ERP_ACTION_REOPEN_PORT: 186 case ZFCP_ERP_ACTION_REOPEN_PORT:
179 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 187 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
180 zfcp_port_get(port); 188 if (!get_device(&port->dev))
189 return NULL;
181 zfcp_erp_action_dismiss_port(port); 190 zfcp_erp_action_dismiss_port(port);
182 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
183 erp_action = &port->erp_action; 192 erp_action = &port->erp_action;
@@ -186,7 +195,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
186 break; 195 break;
187 196
188 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 197 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
189 zfcp_adapter_get(adapter); 198 kref_get(&adapter->ref);
190 zfcp_erp_action_dismiss_adapter(adapter); 199 zfcp_erp_action_dismiss_adapter(adapter);
191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 200 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
192 erp_action = &adapter->erp_action; 201 erp_action = &adapter->erp_action;
@@ -264,11 +273,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
264{ 273{
265 unsigned long flags; 274 unsigned long flags;
266 275
267 read_lock_irqsave(&zfcp_data.config_lock, flags); 276 zfcp_erp_adapter_block(adapter, clear);
268 write_lock(&adapter->erp_lock); 277 zfcp_scsi_schedule_rports_block(adapter);
269 _zfcp_erp_adapter_reopen(adapter, clear, id, ref); 278
270 write_unlock(&adapter->erp_lock); 279 write_lock_irqsave(&adapter->erp_lock, flags);
271 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 280 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
281 zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
282 else
283 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
284 NULL, NULL, id, ref);
285 write_unlock_irqrestore(&adapter->erp_lock, flags);
272} 286}
273 287
274/** 288/**
@@ -345,11 +359,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
345 unsigned long flags; 359 unsigned long flags;
346 struct zfcp_adapter *adapter = port->adapter; 360 struct zfcp_adapter *adapter = port->adapter;
347 361
348 read_lock_irqsave(&zfcp_data.config_lock, flags); 362 write_lock_irqsave(&adapter->erp_lock, flags);
349 write_lock(&adapter->erp_lock);
350 _zfcp_erp_port_forced_reopen(port, clear, id, ref); 363 _zfcp_erp_port_forced_reopen(port, clear, id, ref);
351 write_unlock(&adapter->erp_lock); 364 write_unlock_irqrestore(&adapter->erp_lock, flags);
352 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
353} 365}
354 366
355static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, 367static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
@@ -377,15 +389,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
377 */ 389 */
378int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) 390int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
379{ 391{
380 unsigned long flags;
381 int retval; 392 int retval;
393 unsigned long flags;
382 struct zfcp_adapter *adapter = port->adapter; 394 struct zfcp_adapter *adapter = port->adapter;
383 395
384 read_lock_irqsave(&zfcp_data.config_lock, flags); 396 write_lock_irqsave(&adapter->erp_lock, flags);
385 write_lock(&adapter->erp_lock);
386 retval = _zfcp_erp_port_reopen(port, clear, id, ref); 397 retval = _zfcp_erp_port_reopen(port, clear, id, ref);
387 write_unlock(&adapter->erp_lock); 398 write_unlock_irqrestore(&adapter->erp_lock, flags);
388 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
389 399
390 return retval; 400 return retval;
391} 401}
@@ -424,11 +434,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
424 struct zfcp_port *port = unit->port; 434 struct zfcp_port *port = unit->port;
425 struct zfcp_adapter *adapter = port->adapter; 435 struct zfcp_adapter *adapter = port->adapter;
426 436
427 read_lock_irqsave(&zfcp_data.config_lock, flags); 437 write_lock_irqsave(&adapter->erp_lock, flags);
428 write_lock(&adapter->erp_lock);
429 _zfcp_erp_unit_reopen(unit, clear, id, ref); 438 _zfcp_erp_unit_reopen(unit, clear, id, ref);
430 write_unlock(&adapter->erp_lock); 439 write_unlock_irqrestore(&adapter->erp_lock, flags);
431 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
432} 440}
433 441
434static int status_change_set(unsigned long mask, atomic_t *status) 442static int status_change_set(unsigned long mask, atomic_t *status)
@@ -471,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
471static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 479static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
472{ 480{
473 struct zfcp_adapter *adapter = act->adapter; 481 struct zfcp_adapter *adapter = act->adapter;
482 struct zfcp_fsf_req *req;
474 483
475 if (!act->fsf_req) 484 if (!act->fsf_req_id)
476 return; 485 return;
477 486
478 spin_lock(&adapter->req_list_lock); 487 spin_lock(&adapter->req_list->lock);
479 if (zfcp_reqlist_find_safe(adapter, act->fsf_req) && 488 req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
480 act->fsf_req->erp_action == act) { 489 if (req && req->erp_action == act) {
481 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 490 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
482 ZFCP_STATUS_ERP_TIMEDOUT)) { 491 ZFCP_STATUS_ERP_TIMEDOUT)) {
483 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 492 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
484 zfcp_dbf_rec_action("erscf_1", act); 493 zfcp_dbf_rec_action("erscf_1", act);
485 act->fsf_req->erp_action = NULL; 494 req->erp_action = NULL;
486 } 495 }
487 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 496 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
488 zfcp_dbf_rec_action("erscf_2", act); 497 zfcp_dbf_rec_action("erscf_2", act);
489 if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) 498 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
490 act->fsf_req = NULL; 499 act->fsf_req_id = 0;
491 } else 500 } else
492 act->fsf_req = NULL; 501 act->fsf_req_id = 0;
493 spin_unlock(&adapter->req_list_lock); 502 spin_unlock(&adapter->req_list->lock);
494} 503}
495 504
496/** 505/**
@@ -540,8 +549,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
540{ 549{
541 struct zfcp_port *port; 550 struct zfcp_port *port;
542 551
543 list_for_each_entry(port, &adapter->port_list_head, list) 552 read_lock(&adapter->port_list_lock);
553 list_for_each_entry(port, &adapter->port_list, list)
544 _zfcp_erp_port_reopen(port, clear, id, ref); 554 _zfcp_erp_port_reopen(port, clear, id, ref);
555 read_unlock(&adapter->port_list_lock);
545} 556}
546 557
547static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 558static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
@@ -549,8 +560,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
549{ 560{
550 struct zfcp_unit *unit; 561 struct zfcp_unit *unit;
551 562
552 list_for_each_entry(unit, &port->unit_list_head, list) 563 read_lock(&port->unit_list_lock);
564 list_for_each_entry(unit, &port->unit_list, list)
553 _zfcp_erp_unit_reopen(unit, clear, id, ref); 565 _zfcp_erp_unit_reopen(unit, clear, id, ref);
566 read_unlock(&port->unit_list_lock);
554} 567}
555 568
556static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 569static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -590,16 +603,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
590{ 603{
591 unsigned long flags; 604 unsigned long flags;
592 605
593 read_lock_irqsave(&zfcp_data.config_lock, flags); 606 read_lock_irqsave(&adapter->erp_lock, flags);
594 read_lock(&adapter->erp_lock);
595 if (list_empty(&adapter->erp_ready_head) && 607 if (list_empty(&adapter->erp_ready_head) &&
596 list_empty(&adapter->erp_running_head)) { 608 list_empty(&adapter->erp_running_head)) {
597 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 609 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
598 &adapter->status); 610 &adapter->status);
599 wake_up(&adapter->erp_done_wqh); 611 wake_up(&adapter->erp_done_wqh);
600 } 612 }
601 read_unlock(&adapter->erp_lock); 613 read_unlock_irqrestore(&adapter->erp_lock, flags);
602 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
603} 614}
604 615
605static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) 616static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
@@ -1170,28 +1181,28 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1170 switch (act->action) { 1181 switch (act->action) {
1171 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1182 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1172 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { 1183 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1173 zfcp_unit_get(unit); 1184 get_device(&unit->dev);
1174 if (scsi_queue_work(unit->port->adapter->scsi_host, 1185 if (scsi_queue_work(unit->port->adapter->scsi_host,
1175 &unit->scsi_work) <= 0) 1186 &unit->scsi_work) <= 0)
1176 zfcp_unit_put(unit); 1187 put_device(&unit->dev);
1177 } 1188 }
1178 zfcp_unit_put(unit); 1189 put_device(&unit->dev);
1179 break; 1190 break;
1180 1191
1181 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1192 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1182 case ZFCP_ERP_ACTION_REOPEN_PORT: 1193 case ZFCP_ERP_ACTION_REOPEN_PORT:
1183 if (result == ZFCP_ERP_SUCCEEDED) 1194 if (result == ZFCP_ERP_SUCCEEDED)
1184 zfcp_scsi_schedule_rport_register(port); 1195 zfcp_scsi_schedule_rport_register(port);
1185 zfcp_port_put(port); 1196 put_device(&port->dev);
1186 break; 1197 break;
1187 1198
1188 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1199 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1189 if (result == ZFCP_ERP_SUCCEEDED) { 1200 if (result == ZFCP_ERP_SUCCEEDED) {
1190 register_service_level(&adapter->service_level); 1201 register_service_level(&adapter->service_level);
1191 schedule_work(&adapter->scan_work); 1202 queue_work(adapter->work_queue, &adapter->scan_work);
1192 } else 1203 } else
1193 unregister_service_level(&adapter->service_level); 1204 unregister_service_level(&adapter->service_level);
1194 zfcp_adapter_put(adapter); 1205 kref_put(&adapter->ref, zfcp_adapter_release);
1195 break; 1206 break;
1196 } 1207 }
1197} 1208}
@@ -1214,12 +1225,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1214static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) 1225static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1215{ 1226{
1216 int retval; 1227 int retval;
1217 struct zfcp_adapter *adapter = erp_action->adapter;
1218 unsigned long flags; 1228 unsigned long flags;
1229 struct zfcp_adapter *adapter = erp_action->adapter;
1219 1230
1220 read_lock_irqsave(&zfcp_data.config_lock, flags); 1231 kref_get(&adapter->ref);
1221 write_lock(&adapter->erp_lock);
1222 1232
1233 write_lock_irqsave(&adapter->erp_lock, flags);
1223 zfcp_erp_strategy_check_fsfreq(erp_action); 1234 zfcp_erp_strategy_check_fsfreq(erp_action);
1224 1235
1225 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { 1236 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1231,11 +1242,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1231 zfcp_erp_action_to_running(erp_action); 1242 zfcp_erp_action_to_running(erp_action);
1232 1243
1233 /* no lock to allow for blocking operations */ 1244 /* no lock to allow for blocking operations */
1234 write_unlock(&adapter->erp_lock); 1245 write_unlock_irqrestore(&adapter->erp_lock, flags);
1235 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1236 retval = zfcp_erp_strategy_do_action(erp_action); 1246 retval = zfcp_erp_strategy_do_action(erp_action);
1237 read_lock_irqsave(&zfcp_data.config_lock, flags); 1247 write_lock_irqsave(&adapter->erp_lock, flags);
1238 write_lock(&adapter->erp_lock);
1239 1248
1240 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) 1249 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1241 retval = ZFCP_ERP_CONTINUES; 1250 retval = ZFCP_ERP_CONTINUES;
@@ -1273,12 +1282,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1273 zfcp_erp_strategy_followup_failed(erp_action); 1282 zfcp_erp_strategy_followup_failed(erp_action);
1274 1283
1275 unlock: 1284 unlock:
1276 write_unlock(&adapter->erp_lock); 1285 write_unlock_irqrestore(&adapter->erp_lock, flags);
1277 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1278 1286
1279 if (retval != ZFCP_ERP_CONTINUES) 1287 if (retval != ZFCP_ERP_CONTINUES)
1280 zfcp_erp_action_cleanup(erp_action, retval); 1288 zfcp_erp_action_cleanup(erp_action, retval);
1281 1289
1290 kref_put(&adapter->ref, zfcp_adapter_release);
1282 return retval; 1291 return retval;
1283} 1292}
1284 1293
@@ -1415,6 +1424,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1415 void *ref, u32 mask, int set_or_clear) 1424 void *ref, u32 mask, int set_or_clear)
1416{ 1425{
1417 struct zfcp_port *port; 1426 struct zfcp_port *port;
1427 unsigned long flags;
1418 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1428 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1419 1429
1420 if (set_or_clear == ZFCP_SET) { 1430 if (set_or_clear == ZFCP_SET) {
@@ -1429,10 +1439,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1429 atomic_set(&adapter->erp_counter, 0); 1439 atomic_set(&adapter->erp_counter, 0);
1430 } 1440 }
1431 1441
1432 if (common_mask) 1442 if (common_mask) {
1433 list_for_each_entry(port, &adapter->port_list_head, list) 1443 read_lock_irqsave(&adapter->port_list_lock, flags);
1444 list_for_each_entry(port, &adapter->port_list, list)
1434 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1445 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1435 set_or_clear); 1446 set_or_clear);
1447 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1448 }
1436} 1449}
1437 1450
1438/** 1451/**
@@ -1449,6 +1462,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1449 u32 mask, int set_or_clear) 1462 u32 mask, int set_or_clear)
1450{ 1463{
1451 struct zfcp_unit *unit; 1464 struct zfcp_unit *unit;
1465 unsigned long flags;
1452 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1466 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1453 1467
1454 if (set_or_clear == ZFCP_SET) { 1468 if (set_or_clear == ZFCP_SET) {
@@ -1463,10 +1477,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1463 atomic_set(&port->erp_counter, 0); 1477 atomic_set(&port->erp_counter, 0);
1464 } 1478 }
1465 1479
1466 if (common_mask) 1480 if (common_mask) {
1467 list_for_each_entry(unit, &port->unit_list_head, list) 1481 read_lock_irqsave(&port->unit_list_lock, flags);
1482 list_for_each_entry(unit, &port->unit_list, list)
1468 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1483 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1469 set_or_clear); 1484 set_or_clear);
1485 read_unlock_irqrestore(&port->unit_list_lock, flags);
1486 }
1470} 1487}
1471 1488
1472/** 1489/**
@@ -1502,12 +1519,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1502 */ 1519 */
1503void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1520void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
1504{ 1521{
1505 unsigned long flags;
1506
1507 read_lock_irqsave(&zfcp_data.config_lock, flags);
1508 zfcp_erp_modify_port_status(port, id, ref, 1522 zfcp_erp_modify_port_status(port, id, ref,
1509 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1523 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1510 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1511 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1524 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1512} 1525}
1513 1526
@@ -1535,13 +1548,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1535 */ 1548 */
1536void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) 1549void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1537{ 1550{
1538 unsigned long flags;
1539
1540 read_lock_irqsave(&zfcp_data.config_lock, flags);
1541 zfcp_erp_modify_port_status(port, id, ref, 1551 zfcp_erp_modify_port_status(port, id, ref,
1542 ZFCP_STATUS_COMMON_ERP_FAILED | 1552 ZFCP_STATUS_COMMON_ERP_FAILED |
1543 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1553 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1544 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1545} 1554}
1546 1555
1547/** 1556/**
@@ -1574,12 +1583,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1574 void *ref) 1583 void *ref)
1575{ 1584{
1576 struct zfcp_unit *unit; 1585 struct zfcp_unit *unit;
1586 unsigned long flags;
1577 int status = atomic_read(&port->status); 1587 int status = atomic_read(&port->status);
1578 1588
1579 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1589 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1580 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1590 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1581 list_for_each_entry(unit, &port->unit_list_head, list) 1591 read_lock_irqsave(&port->unit_list_lock, flags);
1592 list_for_each_entry(unit, &port->unit_list, list)
1582 zfcp_erp_unit_access_changed(unit, id, ref); 1593 zfcp_erp_unit_access_changed(unit, id, ref);
1594 read_unlock_irqrestore(&port->unit_list_lock, flags);
1583 return; 1595 return;
1584 } 1596 }
1585 1597
@@ -1595,14 +1607,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1595void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1607void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
1596 void *ref) 1608 void *ref)
1597{ 1609{
1598 struct zfcp_port *port;
1599 unsigned long flags; 1610 unsigned long flags;
1611 struct zfcp_port *port;
1600 1612
1601 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1613 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
1602 return; 1614 return;
1603 1615
1604 read_lock_irqsave(&zfcp_data.config_lock, flags); 1616 read_lock_irqsave(&adapter->port_list_lock, flags);
1605 list_for_each_entry(port, &adapter->port_list_head, list) 1617 list_for_each_entry(port, &adapter->port_list, list)
1606 zfcp_erp_port_access_changed(port, id, ref); 1618 zfcp_erp_port_access_changed(port, id, ref);
1607 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1619 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1608} 1620}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b3f28deb4505..8786a79c7f8f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -9,26 +9,30 @@
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
10#define ZFCP_EXT_H 10#define ZFCP_EXT_H
11 11
12#include <linux/types.h>
13#include <scsi/fc/fc_els.h>
12#include "zfcp_def.h" 14#include "zfcp_def.h"
15#include "zfcp_fc.h"
13 16
14/* zfcp_aux.c */ 17/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); 18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern int zfcp_adapter_enqueue(struct ccw_device *); 20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
20 u32); 22 u32);
21extern void zfcp_port_dequeue(struct zfcp_port *);
22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); 23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
23extern void zfcp_unit_dequeue(struct zfcp_unit *);
24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
25extern void zfcp_sg_free_table(struct scatterlist *, int); 24extern void zfcp_sg_free_table(struct scatterlist *, int);
26extern int zfcp_sg_setup_table(struct scatterlist *, int); 25extern int zfcp_sg_setup_table(struct scatterlist *, int);
26extern void zfcp_device_unregister(struct device *,
27 const struct attribute_group *);
28extern void zfcp_adapter_release(struct kref *);
29extern void zfcp_adapter_unregister(struct zfcp_adapter *);
27 30
28/* zfcp_ccw.c */ 31/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); 32extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
31extern struct ccw_driver zfcp_ccw_driver; 33extern struct ccw_driver zfcp_ccw_driver;
34extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
35extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
32 36
33/* zfcp_cfdc.c */ 37/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 38extern struct miscdevice zfcp_cfdc_misc;
@@ -51,7 +55,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
51 struct fsf_status_read_buffer *); 55 struct fsf_status_read_buffer *);
52extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); 56extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
53extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 57extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
55extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); 59extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
56extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); 60extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
57extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); 61extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
@@ -92,24 +96,23 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
92extern void zfcp_erp_timeout_handler(unsigned long); 96extern void zfcp_erp_timeout_handler(unsigned long);
93 97
94/* zfcp_fc.c */ 98/* zfcp_fc.c */
95extern int zfcp_fc_scan_ports(struct zfcp_adapter *); 99extern void zfcp_fc_scan_ports(struct work_struct *);
96extern void _zfcp_fc_scan_ports_later(struct work_struct *);
97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
98extern void zfcp_fc_port_did_lookup(struct work_struct *); 101extern void zfcp_fc_port_did_lookup(struct work_struct *);
99extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); 102extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 103extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
101extern void zfcp_fc_test_link(struct zfcp_port *); 104extern void zfcp_fc_test_link(struct zfcp_port *);
102extern void zfcp_fc_link_test_work(struct work_struct *); 105extern void zfcp_fc_link_test_work(struct work_struct *);
103extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); 106extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
104extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 107extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
105extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 108extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
106extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); 109extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
107extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); 110extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
108 111
109/* zfcp_fsf.c */ 112/* zfcp_fsf.c */
110extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 113extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
111extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *); 114extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
112extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *); 115extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
113extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 116extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
114extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
115extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 118extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -125,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
125extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 128extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
126extern int zfcp_fsf_status_read(struct zfcp_qdio *); 129extern int zfcp_fsf_status_read(struct zfcp_qdio *);
127extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 130extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
128extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); 131extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
129extern int zfcp_fsf_send_els(struct zfcp_send_els *); 132 mempool_t *, unsigned int);
133extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
134 struct zfcp_fsf_ct_els *, unsigned int);
130extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 135extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
131 struct scsi_cmnd *); 136 struct scsi_cmnd *);
132extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -138,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
138/* zfcp_qdio.c */ 143/* zfcp_qdio.c */
139extern int zfcp_qdio_setup(struct zfcp_adapter *); 144extern int zfcp_qdio_setup(struct zfcp_adapter *);
140extern void zfcp_qdio_destroy(struct zfcp_qdio *); 145extern void zfcp_qdio_destroy(struct zfcp_qdio *);
141extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); 146extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
142extern struct qdio_buffer_element
143 *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
144extern struct qdio_buffer_element
145 *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
146extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, 147extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
147 struct zfcp_queue_req *, unsigned long, 148 struct zfcp_qdio_req *, unsigned long,
148 struct scatterlist *, int); 149 struct scatterlist *, int);
149extern int zfcp_qdio_open(struct zfcp_qdio *); 150extern int zfcp_qdio_open(struct zfcp_qdio *);
150extern void zfcp_qdio_close(struct zfcp_qdio *); 151extern void zfcp_qdio_close(struct zfcp_qdio *);
@@ -153,7 +154,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
153extern struct zfcp_data zfcp_data; 154extern struct zfcp_data zfcp_data;
154extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 155extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
155extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 156extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
156extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
157extern struct fc_function_template zfcp_transport_functions; 157extern struct fc_function_template zfcp_transport_functions;
158extern void zfcp_scsi_rport_work(struct work_struct *); 158extern void zfcp_scsi_rport_work(struct work_struct *);
159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 159extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index df23bcead23d..2a1cbb74b99b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,79 +3,45 @@
3 * 3 *
4 * Fibre Channel related functions for the zfcp device driver. 4 * Fibre Channel related functions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corporation 2008, 2009 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_els.h>
15#include <scsi/libfc.h>
12#include "zfcp_ext.h" 16#include "zfcp_ext.h"
17#include "zfcp_fc.h"
13 18
14enum rscn_address_format { 19static u32 zfcp_fc_rscn_range_mask[] = {
15 RSCN_PORT_ADDRESS = 0x0, 20 [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
16 RSCN_AREA_ADDRESS = 0x1, 21 [ELS_ADDR_FMT_AREA] = 0xFFFF00,
17 RSCN_DOMAIN_ADDRESS = 0x2, 22 [ELS_ADDR_FMT_DOM] = 0xFF0000,
18 RSCN_FABRIC_ADDRESS = 0x3, 23 [ELS_ADDR_FMT_FAB] = 0x000000,
19}; 24};
20 25
21static u32 rscn_range_mask[] = { 26static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
22 [RSCN_PORT_ADDRESS] = 0xFFFFFF,
23 [RSCN_AREA_ADDRESS] = 0xFFFF00,
24 [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
25 [RSCN_FABRIC_ADDRESS] = 0x000000,
26};
27
28struct gpn_ft_resp_acc {
29 u8 control;
30 u8 port_id[3];
31 u8 reserved[4];
32 u64 wwpn;
33} __attribute__ ((packed));
34
35#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
36#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
37 / sizeof(struct gpn_ft_resp_acc))
38#define ZFCP_GPN_FT_BUFFERS 4
39#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
40 - sizeof(struct ct_hdr))
41#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
42
43struct ct_iu_gpn_ft_resp {
44 struct ct_hdr header;
45 struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
46} __attribute__ ((packed));
47
48struct zfcp_gpn_ft {
49 struct zfcp_send_ct ct;
50 struct scatterlist sg_req;
51 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
52};
53
54struct zfcp_fc_ns_handler_data {
55 struct completion done;
56 void (*handler)(unsigned long);
57 unsigned long handler_data;
58};
59
60static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
61{ 27{
62 if (mutex_lock_interruptible(&wka_port->mutex)) 28 if (mutex_lock_interruptible(&wka_port->mutex))
63 return -ERESTARTSYS; 29 return -ERESTARTSYS;
64 30
65 if (wka_port->status == ZFCP_WKA_PORT_OFFLINE || 31 if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
66 wka_port->status == ZFCP_WKA_PORT_CLOSING) { 32 wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
67 wka_port->status = ZFCP_WKA_PORT_OPENING; 33 wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
68 if (zfcp_fsf_open_wka_port(wka_port)) 34 if (zfcp_fsf_open_wka_port(wka_port))
69 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 35 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
70 } 36 }
71 37
72 mutex_unlock(&wka_port->mutex); 38 mutex_unlock(&wka_port->mutex);
73 39
74 wait_event(wka_port->completion_wq, 40 wait_event(wka_port->completion_wq,
75 wka_port->status == ZFCP_WKA_PORT_ONLINE || 41 wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
76 wka_port->status == ZFCP_WKA_PORT_OFFLINE); 42 wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
77 43
78 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { 44 if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
79 atomic_inc(&wka_port->refcount); 45 atomic_inc(&wka_port->refcount);
80 return 0; 46 return 0;
81 } 47 }
@@ -85,24 +51,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
85static void zfcp_fc_wka_port_offline(struct work_struct *work) 51static void zfcp_fc_wka_port_offline(struct work_struct *work)
86{ 52{
87 struct delayed_work *dw = to_delayed_work(work); 53 struct delayed_work *dw = to_delayed_work(work);
88 struct zfcp_wka_port *wka_port = 54 struct zfcp_fc_wka_port *wka_port =
89 container_of(dw, struct zfcp_wka_port, work); 55 container_of(dw, struct zfcp_fc_wka_port, work);
90 56
91 mutex_lock(&wka_port->mutex); 57 mutex_lock(&wka_port->mutex);
92 if ((atomic_read(&wka_port->refcount) != 0) || 58 if ((atomic_read(&wka_port->refcount) != 0) ||
93 (wka_port->status != ZFCP_WKA_PORT_ONLINE)) 59 (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
94 goto out; 60 goto out;
95 61
96 wka_port->status = ZFCP_WKA_PORT_CLOSING; 62 wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
97 if (zfcp_fsf_close_wka_port(wka_port)) { 63 if (zfcp_fsf_close_wka_port(wka_port)) {
98 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 64 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
99 wake_up(&wka_port->completion_wq); 65 wake_up(&wka_port->completion_wq);
100 } 66 }
101out: 67out:
102 mutex_unlock(&wka_port->mutex); 68 mutex_unlock(&wka_port->mutex);
103} 69}
104 70
105static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) 71static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
106{ 72{
107 if (atomic_dec_return(&wka_port->refcount) != 0) 73 if (atomic_dec_return(&wka_port->refcount) != 0)
108 return; 74 return;
@@ -110,7 +76,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
110 schedule_delayed_work(&wka_port->work, HZ / 100); 76 schedule_delayed_work(&wka_port->work, HZ / 100);
111} 77}
112 78
113static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, 79static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
114 struct zfcp_adapter *adapter) 80 struct zfcp_adapter *adapter)
115{ 81{
116 init_waitqueue_head(&wka_port->completion_wq); 82 init_waitqueue_head(&wka_port->completion_wq);
@@ -118,107 +84,107 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
118 wka_port->adapter = adapter; 84 wka_port->adapter = adapter;
119 wka_port->d_id = d_id; 85 wka_port->d_id = d_id;
120 86
121 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 87 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
122 atomic_set(&wka_port->refcount, 0); 88 atomic_set(&wka_port->refcount, 0);
123 mutex_init(&wka_port->mutex); 89 mutex_init(&wka_port->mutex);
124 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); 90 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
125} 91}
126 92
127static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) 93static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
128{ 94{
129 cancel_delayed_work_sync(&wka->work); 95 cancel_delayed_work_sync(&wka->work);
130 mutex_lock(&wka->mutex); 96 mutex_lock(&wka->mutex);
131 wka->status = ZFCP_WKA_PORT_OFFLINE; 97 wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
132 mutex_unlock(&wka->mutex); 98 mutex_unlock(&wka->mutex);
133} 99}
134 100
135void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) 101void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
136{ 102{
103 if (!gs)
104 return;
137 zfcp_fc_wka_port_force_offline(&gs->ms); 105 zfcp_fc_wka_port_force_offline(&gs->ms);
138 zfcp_fc_wka_port_force_offline(&gs->ts); 106 zfcp_fc_wka_port_force_offline(&gs->ts);
139 zfcp_fc_wka_port_force_offline(&gs->ds); 107 zfcp_fc_wka_port_force_offline(&gs->ds);
140 zfcp_fc_wka_port_force_offline(&gs->as); 108 zfcp_fc_wka_port_force_offline(&gs->as);
141 zfcp_fc_wka_port_force_offline(&gs->ks);
142} 109}
143 110
144static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 111static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
145 struct fcp_rscn_element *elem) 112 struct fc_els_rscn_page *page)
146{ 113{
147 unsigned long flags; 114 unsigned long flags;
115 struct zfcp_adapter *adapter = fsf_req->adapter;
148 struct zfcp_port *port; 116 struct zfcp_port *port;
149 117
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 118 read_lock_irqsave(&adapter->port_list_lock, flags);
151 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 119 list_for_each_entry(port, &adapter->port_list, list) {
152 if ((port->d_id & range) == (elem->nport_did & range)) 120 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
153 zfcp_fc_test_link(port); 121 zfcp_fc_test_link(port);
154 if (!port->d_id) 122 if (!port->d_id)
155 zfcp_erp_port_reopen(port, 123 zfcp_erp_port_reopen(port,
156 ZFCP_STATUS_COMMON_ERP_FAILED, 124 ZFCP_STATUS_COMMON_ERP_FAILED,
157 "fcrscn1", NULL); 125 "fcrscn1", NULL);
158 } 126 }
159 127 read_unlock_irqrestore(&adapter->port_list_lock, flags);
160 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
161} 128}
162 129
163static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 130static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
164{ 131{
165 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 132 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
166 struct fcp_rscn_head *fcp_rscn_head; 133 struct fc_els_rscn *head;
167 struct fcp_rscn_element *fcp_rscn_element; 134 struct fc_els_rscn_page *page;
168 u16 i; 135 u16 i;
169 u16 no_entries; 136 u16 no_entries;
170 u32 range_mask; 137 unsigned int afmt;
171 138
172 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data; 139 head = (struct fc_els_rscn *) status_buffer->payload.data;
173 fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head; 140 page = (struct fc_els_rscn_page *) head;
174 141
175 /* see FC-FS */ 142 /* see FC-FS */
176 no_entries = fcp_rscn_head->payload_len / 143 no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
177 sizeof(struct fcp_rscn_element);
178 144
179 for (i = 1; i < no_entries; i++) { 145 for (i = 1; i < no_entries; i++) {
180 /* skip head and start with 1st element */ 146 /* skip head and start with 1st element */
181 fcp_rscn_element++; 147 page++;
182 range_mask = rscn_range_mask[fcp_rscn_element->addr_format]; 148 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
183 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); 149 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
150 page);
184 } 151 }
185 schedule_work(&fsf_req->adapter->scan_work); 152 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
186} 153}
187 154
188static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) 155static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
189{ 156{
157 unsigned long flags;
190 struct zfcp_adapter *adapter = req->adapter; 158 struct zfcp_adapter *adapter = req->adapter;
191 struct zfcp_port *port; 159 struct zfcp_port *port;
192 unsigned long flags;
193 160
194 read_lock_irqsave(&zfcp_data.config_lock, flags); 161 read_lock_irqsave(&adapter->port_list_lock, flags);
195 list_for_each_entry(port, &adapter->port_list_head, list) 162 list_for_each_entry(port, &adapter->port_list, list)
196 if (port->wwpn == wwpn) 163 if (port->wwpn == wwpn) {
164 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
197 break; 165 break;
198 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 166 }
199 167 read_unlock_irqrestore(&adapter->port_list_lock, flags);
200 if (port && (port->wwpn == wwpn))
201 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
202} 168}
203 169
204static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) 170static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
205{ 171{
206 struct fsf_status_read_buffer *status_buffer = 172 struct fsf_status_read_buffer *status_buffer;
207 (struct fsf_status_read_buffer *)req->data; 173 struct fc_els_flogi *plogi;
208 struct fsf_plogi *els_plogi =
209 (struct fsf_plogi *) status_buffer->payload.data;
210 174
211 zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn); 175 status_buffer = (struct fsf_status_read_buffer *) req->data;
176 plogi = (struct fc_els_flogi *) status_buffer->payload.data;
177 zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
212} 178}
213 179
214static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) 180static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
215{ 181{
216 struct fsf_status_read_buffer *status_buffer = 182 struct fsf_status_read_buffer *status_buffer =
217 (struct fsf_status_read_buffer *)req->data; 183 (struct fsf_status_read_buffer *)req->data;
218 struct fcp_logo *els_logo = 184 struct fc_els_logo *logo =
219 (struct fcp_logo *) status_buffer->payload.data; 185 (struct fc_els_logo *) status_buffer->payload.data;
220 186
221 zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn); 187 zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
222} 188}
223 189
224/** 190/**
@@ -232,79 +198,73 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
232 unsigned int els_type = status_buffer->payload.data[0]; 198 unsigned int els_type = status_buffer->payload.data[0];
233 199
234 zfcp_dbf_san_incoming_els(fsf_req); 200 zfcp_dbf_san_incoming_els(fsf_req);
235 if (els_type == LS_PLOGI) 201 if (els_type == ELS_PLOGI)
236 zfcp_fc_incoming_plogi(fsf_req); 202 zfcp_fc_incoming_plogi(fsf_req);
237 else if (els_type == LS_LOGO) 203 else if (els_type == ELS_LOGO)
238 zfcp_fc_incoming_logo(fsf_req); 204 zfcp_fc_incoming_logo(fsf_req);
239 else if (els_type == LS_RSCN) 205 else if (els_type == ELS_RSCN)
240 zfcp_fc_incoming_rscn(fsf_req); 206 zfcp_fc_incoming_rscn(fsf_req);
241} 207}
242 208
243static void zfcp_fc_ns_handler(unsigned long data) 209static void zfcp_fc_ns_gid_pn_eval(void *data)
244{ 210{
245 struct zfcp_fc_ns_handler_data *compl_rec = 211 struct zfcp_fc_gid_pn *gid_pn = data;
246 (struct zfcp_fc_ns_handler_data *) data; 212 struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
247 213 struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
248 if (compl_rec->handler) 214 struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
249 compl_rec->handler(compl_rec->handler_data);
250
251 complete(&compl_rec->done);
252}
253
254static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
255{
256 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
257 struct zfcp_send_ct *ct = &gid_pn->ct;
258 struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
259 struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
260 struct zfcp_port *port = gid_pn->port; 215 struct zfcp_port *port = gid_pn->port;
261 216
262 if (ct->status) 217 if (ct->status)
263 return; 218 return;
264 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) 219 if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
265 return; 220 return;
266 221
267 /* paranoia */ 222 /* paranoia */
268 if (ct_iu_req->wwpn != port->wwpn) 223 if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
269 return; 224 return;
270 /* looks like a valid d_id */ 225 /* looks like a valid d_id */
271 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 226 port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
227}
228
229static void zfcp_fc_complete(void *data)
230{
231 complete(data);
272} 232}
273 233
274static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, 234static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
275 struct zfcp_gid_pn_data *gid_pn) 235 struct zfcp_fc_gid_pn *gid_pn)
276{ 236{
277 struct zfcp_adapter *adapter = port->adapter; 237 struct zfcp_adapter *adapter = port->adapter;
278 struct zfcp_fc_ns_handler_data compl_rec; 238 DECLARE_COMPLETION_ONSTACK(completion);
279 int ret; 239 int ret;
280 240
281 /* setup parameters for send generic command */ 241 /* setup parameters for send generic command */
282 gid_pn->port = port; 242 gid_pn->port = port;
283 gid_pn->ct.wka_port = &adapter->gs->ds; 243 gid_pn->ct.handler = zfcp_fc_complete;
284 gid_pn->ct.handler = zfcp_fc_ns_handler; 244 gid_pn->ct.handler_data = &completion;
285 gid_pn->ct.handler_data = (unsigned long) &compl_rec; 245 gid_pn->ct.req = &gid_pn->sg_req;
286 gid_pn->ct.req = &gid_pn->req; 246 gid_pn->ct.resp = &gid_pn->sg_resp;
287 gid_pn->ct.resp = &gid_pn->resp; 247 sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
288 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, 248 sizeof(struct zfcp_fc_gid_pn_req));
289 sizeof(struct ct_iu_gid_pn_req)); 249 sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
290 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, 250 sizeof(struct zfcp_fc_gid_pn_resp));
291 sizeof(struct ct_iu_gid_pn_resp));
292 251
293 /* setup nameserver request */ 252 /* setup nameserver request */
294 gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION; 253 gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
295 gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 254 gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
296 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; 255 gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
297 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; 256 gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
298 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; 257 gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
299 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; 258 gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
300 gid_pn->ct_iu_req.wwpn = port->wwpn; 259 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
301 260
302 init_completion(&compl_rec.done); 261 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
303 compl_rec.handler = zfcp_fc_ns_gid_pn_eval; 262 adapter->pool.gid_pn_req,
304 compl_rec.handler_data = (unsigned long) gid_pn; 263 ZFCP_FC_CTELS_TMO);
305 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); 264 if (!ret) {
306 if (!ret) 265 wait_for_completion(&completion);
307 wait_for_completion(&compl_rec.done); 266 zfcp_fc_ns_gid_pn_eval(gid_pn);
267 }
308 return ret; 268 return ret;
309} 269}
310 270
@@ -316,10 +276,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
316static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) 276static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
317{ 277{
318 int ret; 278 int ret;
319 struct zfcp_gid_pn_data *gid_pn; 279 struct zfcp_fc_gid_pn *gid_pn;
320 struct zfcp_adapter *adapter = port->adapter; 280 struct zfcp_adapter *adapter = port->adapter;
321 281
322 gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); 282 gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
323 if (!gid_pn) 283 if (!gid_pn)
324 return -ENOMEM; 284 return -ENOMEM;
325 285
@@ -333,7 +293,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
333 293
334 zfcp_fc_wka_port_put(&adapter->gs->ds); 294 zfcp_fc_wka_port_put(&adapter->gs->ds);
335out: 295out:
336 mempool_free(gid_pn, adapter->pool.gid_pn_data); 296 mempool_free(gid_pn, adapter->pool.gid_pn);
337 return ret; 297 return ret;
338} 298}
339 299
@@ -357,7 +317,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
357 317
358 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 318 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
359out: 319out:
360 zfcp_port_put(port); 320 put_device(&port->dev);
361} 321}
362 322
363/** 323/**
@@ -366,9 +326,9 @@ out:
366 */ 326 */
367void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) 327void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
368{ 328{
369 zfcp_port_get(port); 329 get_device(&port->dev);
370 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) 330 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
371 zfcp_port_put(port); 331 put_device(&port->dev);
372} 332}
373 333
374/** 334/**
@@ -378,33 +338,36 @@ void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
378 * 338 *
379 * Evaluate PLOGI playload and copy important fields into zfcp_port structure 339 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
380 */ 340 */
381void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) 341void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
382{ 342{
383 port->maxframe_size = plogi->serv_param.common_serv_param[7] | 343 if (plogi->fl_wwpn != port->wwpn) {
384 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); 344 port->d_id = 0;
385 if (plogi->serv_param.class1_serv_param[0] & 0x80) 345 dev_warn(&port->adapter->ccw_device->dev,
346 "A port opened with WWPN 0x%016Lx returned data that "
347 "identifies it as WWPN 0x%016Lx\n",
348 (unsigned long long) port->wwpn,
349 (unsigned long long) plogi->fl_wwpn);
350 return;
351 }
352
353 port->wwnn = plogi->fl_wwnn;
354 port->maxframe_size = plogi->fl_csp.sp_bb_data;
355
356 if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
386 port->supported_classes |= FC_COS_CLASS1; 357 port->supported_classes |= FC_COS_CLASS1;
387 if (plogi->serv_param.class2_serv_param[0] & 0x80) 358 if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
388 port->supported_classes |= FC_COS_CLASS2; 359 port->supported_classes |= FC_COS_CLASS2;
389 if (plogi->serv_param.class3_serv_param[0] & 0x80) 360 if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
390 port->supported_classes |= FC_COS_CLASS3; 361 port->supported_classes |= FC_COS_CLASS3;
391 if (plogi->serv_param.class4_serv_param[0] & 0x80) 362 if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
392 port->supported_classes |= FC_COS_CLASS4; 363 port->supported_classes |= FC_COS_CLASS4;
393} 364}
394 365
395struct zfcp_els_adisc { 366static void zfcp_fc_adisc_handler(void *data)
396 struct zfcp_send_els els;
397 struct scatterlist req;
398 struct scatterlist resp;
399 struct zfcp_ls_adisc ls_adisc;
400 struct zfcp_ls_adisc ls_adisc_acc;
401};
402
403static void zfcp_fc_adisc_handler(unsigned long data)
404{ 367{
405 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 368 struct zfcp_fc_els_adisc *adisc = data;
406 struct zfcp_port *port = adisc->els.port; 369 struct zfcp_port *port = adisc->els.port;
407 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc; 370 struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
408 371
409 if (adisc->els.status) { 372 if (adisc->els.status) {
410 /* request rejected or timed out */ 373 /* request rejected or timed out */
@@ -414,9 +377,9 @@ static void zfcp_fc_adisc_handler(unsigned long data)
414 } 377 }
415 378
416 if (!port->wwnn) 379 if (!port->wwnn)
417 port->wwnn = ls_adisc->wwnn; 380 port->wwnn = adisc_resp->adisc_wwnn;
418 381
419 if ((port->wwpn != ls_adisc->wwpn) || 382 if ((port->wwpn != adisc_resp->adisc_wwpn) ||
420 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { 383 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
421 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 384 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
422 "fcadh_2", NULL); 385 "fcadh_2", NULL);
@@ -427,40 +390,45 @@ static void zfcp_fc_adisc_handler(unsigned long data)
427 zfcp_scsi_schedule_rport_register(port); 390 zfcp_scsi_schedule_rport_register(port);
428 out: 391 out:
429 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 392 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
430 zfcp_port_put(port); 393 put_device(&port->dev);
431 kfree(adisc); 394 kmem_cache_free(zfcp_data.adisc_cache, adisc);
432} 395}
433 396
434static int zfcp_fc_adisc(struct zfcp_port *port) 397static int zfcp_fc_adisc(struct zfcp_port *port)
435{ 398{
436 struct zfcp_els_adisc *adisc; 399 struct zfcp_fc_els_adisc *adisc;
437 struct zfcp_adapter *adapter = port->adapter; 400 struct zfcp_adapter *adapter = port->adapter;
401 int ret;
438 402
439 adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC); 403 adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
440 if (!adisc) 404 if (!adisc)
441 return -ENOMEM; 405 return -ENOMEM;
442 406
407 adisc->els.port = port;
443 adisc->els.req = &adisc->req; 408 adisc->els.req = &adisc->req;
444 adisc->els.resp = &adisc->resp; 409 adisc->els.resp = &adisc->resp;
445 sg_init_one(adisc->els.req, &adisc->ls_adisc, 410 sg_init_one(adisc->els.req, &adisc->adisc_req,
446 sizeof(struct zfcp_ls_adisc)); 411 sizeof(struct fc_els_adisc));
447 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 412 sg_init_one(adisc->els.resp, &adisc->adisc_resp,
448 sizeof(struct zfcp_ls_adisc)); 413 sizeof(struct fc_els_adisc));
449 414
450 adisc->els.adapter = adapter;
451 adisc->els.port = port;
452 adisc->els.d_id = port->d_id;
453 adisc->els.handler = zfcp_fc_adisc_handler; 415 adisc->els.handler = zfcp_fc_adisc_handler;
454 adisc->els.handler_data = (unsigned long) adisc; 416 adisc->els.handler_data = adisc;
455 adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
456 417
457 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 418 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
458 without FC-AL-2 capability, so we don't set it */ 419 without FC-AL-2 capability, so we don't set it */
459 adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host); 420 adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
460 adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host); 421 adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
461 adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host); 422 adisc->adisc_req.adisc_cmd = ELS_ADISC;
423 hton24(adisc->adisc_req.adisc_port_id,
424 fc_host_port_id(adapter->scsi_host));
425
426 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
427 ZFCP_FC_CTELS_TMO);
428 if (ret)
429 kmem_cache_free(zfcp_data.adisc_cache, adisc);
462 430
463 return zfcp_fsf_send_els(&adisc->els); 431 return ret;
464} 432}
465 433
466void zfcp_fc_link_test_work(struct work_struct *work) 434void zfcp_fc_link_test_work(struct work_struct *work)
@@ -469,7 +437,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
469 container_of(work, struct zfcp_port, test_link_work); 437 container_of(work, struct zfcp_port, test_link_work);
470 int retval; 438 int retval;
471 439
472 zfcp_port_get(port); 440 get_device(&port->dev);
473 port->rport_task = RPORT_DEL; 441 port->rport_task = RPORT_DEL;
474 zfcp_scsi_rport_work(&port->rport_work); 442 zfcp_scsi_rport_work(&port->rport_work);
475 443
@@ -488,7 +456,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
488 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 456 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
489 457
490out: 458out:
491 zfcp_port_put(port); 459 put_device(&port->dev);
492} 460}
493 461
494/** 462/**
@@ -501,12 +469,12 @@ out:
501 */ 469 */
502void zfcp_fc_test_link(struct zfcp_port *port) 470void zfcp_fc_test_link(struct zfcp_port *port)
503{ 471{
504 zfcp_port_get(port); 472 get_device(&port->dev);
505 if (!queue_work(port->adapter->work_queue, &port->test_link_work)) 473 if (!queue_work(port->adapter->work_queue, &port->test_link_work))
506 zfcp_port_put(port); 474 put_device(&port->dev);
507} 475}
508 476
509static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) 477static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
510{ 478{
511 struct scatterlist *sg = &gpn_ft->sg_req; 479 struct scatterlist *sg = &gpn_ft->sg_req;
512 480
@@ -516,10 +484,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
516 kfree(gpn_ft); 484 kfree(gpn_ft);
517} 485}
518 486
519static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) 487static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
520{ 488{
521 struct zfcp_gpn_ft *gpn_ft; 489 struct zfcp_fc_gpn_ft *gpn_ft;
522 struct ct_iu_gpn_ft_req *req; 490 struct zfcp_fc_gpn_ft_req *req;
523 491
524 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); 492 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
525 if (!gpn_ft) 493 if (!gpn_ft)
@@ -542,159 +510,152 @@ out:
542} 510}
543 511
544 512
545static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, 513static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
546 struct zfcp_adapter *adapter, int max_bytes) 514 struct zfcp_adapter *adapter, int max_bytes)
547{ 515{
548 struct zfcp_send_ct *ct = &gpn_ft->ct; 516 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
549 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 517 struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
550 struct zfcp_fc_ns_handler_data compl_rec; 518 DECLARE_COMPLETION_ONSTACK(completion);
551 int ret; 519 int ret;
552 520
553 /* prepare CT IU for GPN_FT */ 521 /* prepare CT IU for GPN_FT */
554 req->header.revision = ZFCP_CT_REVISION; 522 req->ct_hdr.ct_rev = FC_CT_REV;
555 req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 523 req->ct_hdr.ct_fs_type = FC_FST_DIR;
556 req->header.gs_subtype = ZFCP_CT_NAME_SERVER; 524 req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
557 req->header.options = ZFCP_CT_SYNCHRONOUS; 525 req->ct_hdr.ct_options = 0;
558 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; 526 req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
559 req->header.max_res_size = max_bytes / 4; 527 req->ct_hdr.ct_mr_size = max_bytes / 4;
560 req->flags = 0; 528 req->gpn_ft.fn_domain_id_scope = 0;
561 req->domain_id_scope = 0; 529 req->gpn_ft.fn_area_id_scope = 0;
562 req->area_id_scope = 0; 530 req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
563 req->fc4_type = ZFCP_CT_SCSI_FCP;
564 531
565 /* prepare zfcp_send_ct */ 532 /* prepare zfcp_send_ct */
566 ct->wka_port = &adapter->gs->ds; 533 ct->handler = zfcp_fc_complete;
567 ct->handler = zfcp_fc_ns_handler; 534 ct->handler_data = &completion;
568 ct->handler_data = (unsigned long)&compl_rec;
569 ct->req = &gpn_ft->sg_req; 535 ct->req = &gpn_ft->sg_req;
570 ct->resp = gpn_ft->sg_resp; 536 ct->resp = gpn_ft->sg_resp;
571 537
572 init_completion(&compl_rec.done); 538 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
573 compl_rec.handler = NULL; 539 ZFCP_FC_CTELS_TMO);
574 ret = zfcp_fsf_send_ct(ct, NULL);
575 if (!ret) 540 if (!ret)
576 wait_for_completion(&compl_rec.done); 541 wait_for_completion(&completion);
577 return ret; 542 return ret;
578} 543}
579 544
580static void zfcp_fc_validate_port(struct zfcp_port *port) 545static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
581{ 546{
582 struct zfcp_adapter *adapter = port->adapter;
583
584 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) 547 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
585 return; 548 return;
586 549
587 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 550 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
588 551
589 if ((port->supported_classes != 0) || 552 if ((port->supported_classes != 0) ||
590 !list_empty(&port->unit_list_head)) { 553 !list_empty(&port->unit_list))
591 zfcp_port_put(port);
592 return; 554 return;
593 } 555
594 zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL); 556 list_move_tail(&port->list, lh);
595 zfcp_erp_wait(adapter);
596 zfcp_port_put(port);
597 zfcp_port_dequeue(port);
598} 557}
599 558
600static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) 559static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
560 struct zfcp_adapter *adapter, int max_entries)
601{ 561{
602 struct zfcp_send_ct *ct = &gpn_ft->ct; 562 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
603 struct scatterlist *sg = gpn_ft->sg_resp; 563 struct scatterlist *sg = gpn_ft->sg_resp;
604 struct ct_hdr *hdr = sg_virt(sg); 564 struct fc_ct_hdr *hdr = sg_virt(sg);
605 struct gpn_ft_resp_acc *acc = sg_virt(sg); 565 struct fc_gpn_ft_resp *acc = sg_virt(sg);
606 struct zfcp_adapter *adapter = ct->wka_port->adapter;
607 struct zfcp_port *port, *tmp; 566 struct zfcp_port *port, *tmp;
567 unsigned long flags;
568 LIST_HEAD(remove_lh);
608 u32 d_id; 569 u32 d_id;
609 int ret = 0, x, last = 0; 570 int ret = 0, x, last = 0;
610 571
611 if (ct->status) 572 if (ct->status)
612 return -EIO; 573 return -EIO;
613 574
614 if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) { 575 if (hdr->ct_cmd != FC_FS_ACC) {
615 if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD) 576 if (hdr->ct_reason == FC_BA_RJT_UNABLE)
616 return -EAGAIN; /* might be a temporary condition */ 577 return -EAGAIN; /* might be a temporary condition */
617 return -EIO; 578 return -EIO;
618 } 579 }
619 580
620 if (hdr->max_res_size) { 581 if (hdr->ct_mr_size) {
621 dev_warn(&adapter->ccw_device->dev, 582 dev_warn(&adapter->ccw_device->dev,
622 "The name server reported %d words residual data\n", 583 "The name server reported %d words residual data\n",
623 hdr->max_res_size); 584 hdr->ct_mr_size);
624 return -E2BIG; 585 return -E2BIG;
625 } 586 }
626 587
627 mutex_lock(&zfcp_data.config_mutex);
628
629 /* first entry is the header */ 588 /* first entry is the header */
630 for (x = 1; x < max_entries && !last; x++) { 589 for (x = 1; x < max_entries && !last; x++) {
631 if (x % (ZFCP_GPN_FT_ENTRIES + 1)) 590 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
632 acc++; 591 acc++;
633 else 592 else
634 acc = sg_virt(++sg); 593 acc = sg_virt(++sg);
635 594
636 last = acc->control & 0x80; 595 last = acc->fp_flags & FC_NS_FID_LAST;
637 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 596 d_id = ntoh24(acc->fp_fid);
638 acc->port_id[2];
639 597
640 /* don't attach ports with a well known address */ 598 /* don't attach ports with a well known address */
641 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA) 599 if (d_id >= FC_FID_WELL_KNOWN_BASE)
642 continue; 600 continue;
643 /* skip the adapter's port and known remote ports */ 601 /* skip the adapter's port and known remote ports */
644 if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) 602 if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
645 continue;
646 port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
647 if (port)
648 continue; 603 continue;
649 604
650 port = zfcp_port_enqueue(adapter, acc->wwpn, 605 port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
651 ZFCP_STATUS_COMMON_NOESC, d_id); 606 ZFCP_STATUS_COMMON_NOESC, d_id);
652 if (IS_ERR(port)) 607 if (!IS_ERR(port))
653 ret = PTR_ERR(port);
654 else
655 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); 608 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
609 else if (PTR_ERR(port) != -EEXIST)
610 ret = PTR_ERR(port);
656 } 611 }
657 612
658 zfcp_erp_wait(adapter); 613 zfcp_erp_wait(adapter);
659 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) 614 write_lock_irqsave(&adapter->port_list_lock, flags);
660 zfcp_fc_validate_port(port); 615 list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
661 mutex_unlock(&zfcp_data.config_mutex); 616 zfcp_fc_validate_port(port, &remove_lh);
617 write_unlock_irqrestore(&adapter->port_list_lock, flags);
618
619 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
620 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
621 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
622 }
623
662 return ret; 624 return ret;
663} 625}
664 626
665/** 627/**
666 * zfcp_fc_scan_ports - scan remote ports and attach new ports 628 * zfcp_fc_scan_ports - scan remote ports and attach new ports
667 * @adapter: pointer to struct zfcp_adapter 629 * @work: reference to scheduled work
668 */ 630 */
669int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) 631void zfcp_fc_scan_ports(struct work_struct *work)
670{ 632{
633 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
634 scan_work);
671 int ret, i; 635 int ret, i;
672 struct zfcp_gpn_ft *gpn_ft; 636 struct zfcp_fc_gpn_ft *gpn_ft;
673 int chain, max_entries, buf_num, max_bytes; 637 int chain, max_entries, buf_num, max_bytes;
674 638
675 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 639 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
676 buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1; 640 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
677 max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES; 641 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
678 max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE; 642 max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
679 643
680 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && 644 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
681 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) 645 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
682 return 0; 646 return;
683 647
684 ret = zfcp_fc_wka_port_get(&adapter->gs->ds); 648 if (zfcp_fc_wka_port_get(&adapter->gs->ds))
685 if (ret) 649 return;
686 return ret;
687 650
688 gpn_ft = zfcp_alloc_sg_env(buf_num); 651 gpn_ft = zfcp_alloc_sg_env(buf_num);
689 if (!gpn_ft) { 652 if (!gpn_ft)
690 ret = -ENOMEM;
691 goto out; 653 goto out;
692 }
693 654
694 for (i = 0; i < 3; i++) { 655 for (i = 0; i < 3; i++) {
695 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); 656 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
696 if (!ret) { 657 if (!ret) {
697 ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); 658 ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
698 if (ret == -EAGAIN) 659 if (ret == -EAGAIN)
699 ssleep(1); 660 ssleep(1);
700 else 661 else
@@ -704,174 +665,142 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
704 zfcp_free_sg_env(gpn_ft, buf_num); 665 zfcp_free_sg_env(gpn_ft, buf_num);
705out: 666out:
706 zfcp_fc_wka_port_put(&adapter->gs->ds); 667 zfcp_fc_wka_port_put(&adapter->gs->ds);
707 return ret;
708} 668}
709 669
710 670static void zfcp_fc_ct_els_job_handler(void *data)
711void _zfcp_fc_scan_ports_later(struct work_struct *work)
712{ 671{
713 zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); 672 struct fc_bsg_job *job = data;
714} 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
674 struct fc_bsg_reply *jr = job->reply;
715 675
716struct zfcp_els_fc_job { 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
717 struct zfcp_send_els els; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
718 struct fc_bsg_job *job; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
719}; 679 job->job_done(job);
680}
720 681
721static void zfcp_fc_generic_els_handler(unsigned long data) 682static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
722{ 683{
723 struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data; 684 u32 preamble_word1;
724 struct fc_bsg_job *job = els_fc_job->job; 685 u8 gs_type;
725 struct fc_bsg_reply *reply = job->reply; 686 struct zfcp_adapter *adapter;
726 687
727 if (els_fc_job->els.status) { 688 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
728 /* request rejected or timed out */ 689 gs_type = (preamble_word1 & 0xff000000) >> 24;
729 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
730 goto out;
731 }
732 690
733 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 691 adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
734 reply->reply_payload_rcv_len = job->reply_payload.payload_len;
735 692
736out: 693 switch (gs_type) {
737 job->state_flags = FC_RQST_STATE_DONE; 694 case FC_FST_ALIAS:
738 job->job_done(job); 695 return &adapter->gs->as;
739 kfree(els_fc_job); 696 case FC_FST_MGMT:
697 return &adapter->gs->ms;
698 case FC_FST_TIME:
699 return &adapter->gs->ts;
700 break;
701 case FC_FST_DIR:
702 return &adapter->gs->ds;
703 break;
704 default:
705 return NULL;
706 }
740} 707}
741 708
742int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) 709static void zfcp_fc_ct_job_handler(void *data)
743{ 710{
744 struct zfcp_els_fc_job *els_fc_job; 711 struct fc_bsg_job *job = data;
745 struct fc_rport *rport = job->rport; 712 struct zfcp_fc_wka_port *wka_port;
746 struct Scsi_Host *shost;
747 struct zfcp_adapter *adapter;
748 struct zfcp_port *port;
749 u8 *port_did;
750 713
751 shost = rport ? rport_to_shost(rport) : job->shost; 714 wka_port = zfcp_fc_job_wka_port(job);
752 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 715 zfcp_fc_wka_port_put(wka_port);
753 716
754 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) 717 zfcp_fc_ct_els_job_handler(data);
755 return -EINVAL; 718}
756 719
757 els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL); 720static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
758 if (!els_fc_job) 721 struct zfcp_adapter *adapter)
759 return -ENOMEM; 722{
723 struct zfcp_fsf_ct_els *els = job->dd_data;
724 struct fc_rport *rport = job->rport;
725 struct zfcp_port *port;
726 u32 d_id;
760 727
761 els_fc_job->els.adapter = adapter;
762 if (rport) { 728 if (rport) {
763 read_lock_irq(&zfcp_data.config_lock);
764 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 729 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
765 if (port) 730 if (!port)
766 els_fc_job->els.d_id = port->d_id;
767 read_unlock_irq(&zfcp_data.config_lock);
768 if (!port) {
769 kfree(els_fc_job);
770 return -EINVAL; 731 return -EINVAL;
771 }
772 } else {
773 port_did = job->request->rqst_data.h_els.port_id;
774 els_fc_job->els.d_id = (port_did[0] << 16) +
775 (port_did[1] << 8) + port_did[2];
776 }
777 732
778 els_fc_job->els.req = job->request_payload.sg_list; 733 d_id = port->d_id;
779 els_fc_job->els.resp = job->reply_payload.sg_list; 734 put_device(&port->dev);
780 els_fc_job->els.handler = zfcp_fc_generic_els_handler; 735 } else
781 els_fc_job->els.handler_data = (unsigned long) els_fc_job; 736 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
782 els_fc_job->job = job;
783 737
784 return zfcp_fsf_send_els(&els_fc_job->els); 738 els->handler = zfcp_fc_ct_els_job_handler;
739 return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
785} 740}
786 741
787struct zfcp_ct_fc_job { 742static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
788 struct zfcp_send_ct ct; 743 struct zfcp_adapter *adapter)
789 struct fc_bsg_job *job;
790};
791
792static void zfcp_fc_generic_ct_handler(unsigned long data)
793{ 744{
794 struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data; 745 int ret;
795 struct fc_bsg_job *job = ct_fc_job->job; 746 struct zfcp_fsf_ct_els *ct = job->dd_data;
747 struct zfcp_fc_wka_port *wka_port;
796 748
797 job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ? 749 wka_port = zfcp_fc_job_wka_port(job);
798 FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 750 if (!wka_port)
799 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 751 return -EINVAL;
800 job->state_flags = FC_RQST_STATE_DONE;
801 job->job_done(job);
802 752
803 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); 753 ret = zfcp_fc_wka_port_get(wka_port);
754 if (ret)
755 return ret;
804 756
805 kfree(ct_fc_job); 757 ct->handler = zfcp_fc_ct_job_handler;
758 ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
759 if (ret)
760 zfcp_fc_wka_port_put(wka_port);
761
762 return ret;
806} 763}
807 764
808int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) 765int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
809{ 766{
810 int ret;
811 u8 gs_type;
812 struct fc_rport *rport = job->rport;
813 struct Scsi_Host *shost; 767 struct Scsi_Host *shost;
814 struct zfcp_adapter *adapter; 768 struct zfcp_adapter *adapter;
815 struct zfcp_ct_fc_job *ct_fc_job; 769 struct zfcp_fsf_ct_els *ct_els = job->dd_data;
816 u32 preamble_word1;
817
818 shost = rport ? rport_to_shost(rport) : job->shost;
819 770
771 shost = job->rport ? rport_to_shost(job->rport) : job->shost;
820 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 772 adapter = (struct zfcp_adapter *)shost->hostdata[0];
773
821 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) 774 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
822 return -EINVAL; 775 return -EINVAL;
823 776
824 ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL); 777 ct_els->req = job->request_payload.sg_list;
825 if (!ct_fc_job) 778 ct_els->resp = job->reply_payload.sg_list;
826 return -ENOMEM; 779 ct_els->handler_data = job;
827 780
828 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 781 switch (job->request->msgcode) {
829 gs_type = (preamble_word1 & 0xff000000) >> 24; 782 case FC_BSG_RPT_ELS:
830 783 case FC_BSG_HST_ELS_NOLOGIN:
831 switch (gs_type) { 784 return zfcp_fc_exec_els_job(job, adapter);
832 case FC_FST_ALIAS: 785 case FC_BSG_RPT_CT:
833 ct_fc_job->ct.wka_port = &adapter->gs->as; 786 case FC_BSG_HST_CT:
834 break; 787 return zfcp_fc_exec_ct_job(job, adapter);
835 case FC_FST_MGMT:
836 ct_fc_job->ct.wka_port = &adapter->gs->ms;
837 break;
838 case FC_FST_TIME:
839 ct_fc_job->ct.wka_port = &adapter->gs->ts;
840 break;
841 case FC_FST_DIR:
842 ct_fc_job->ct.wka_port = &adapter->gs->ds;
843 break;
844 default: 788 default:
845 kfree(ct_fc_job); 789 return -EINVAL;
846 return -EINVAL; /* no such service */
847 }
848
849 ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
850 if (ret) {
851 kfree(ct_fc_job);
852 return ret;
853 } 790 }
791}
854 792
855 ct_fc_job->ct.req = job->request_payload.sg_list; 793int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
856 ct_fc_job->ct.resp = job->reply_payload.sg_list; 794{
857 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; 795 /* hardware tracks timeout, reset bsg timeout to not interfere */
858 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; 796 return -EAGAIN;
859 ct_fc_job->ct.completion = NULL;
860 ct_fc_job->job = job;
861
862 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
863 if (ret) {
864 kfree(ct_fc_job);
865 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
866 }
867 return ret;
868} 797}
869 798
870int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) 799int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
871{ 800{
872 struct zfcp_wka_ports *wka_ports; 801 struct zfcp_fc_wka_ports *wka_ports;
873 802
874 wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); 803 wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
875 if (!wka_ports) 804 if (!wka_ports)
876 return -ENOMEM; 805 return -ENOMEM;
877 806
@@ -880,7 +809,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
880 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); 809 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
881 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); 810 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
882 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); 811 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
883 zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
884 812
885 return 0; 813 return 0;
886} 814}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 000000000000..0747b087390d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,262 @@
1/*
2 * zfcp device driver
3 *
4 * Fibre Channel related definitions and inline functions for the zfcp
5 * device driver
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_FC_H
11#define ZFCP_FC_H
12
13#include <scsi/fc/fc_els.h>
14#include <scsi/fc/fc_fcp.h>
15#include <scsi/fc/fc_ns.h>
16#include <scsi/scsi_cmnd.h>
17#include <scsi/scsi_tcq.h>
18#include "zfcp_fsf.h"
19
20#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
21#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
22 / sizeof(struct fc_gpn_ft_resp))
23#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
24
25#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
26 - sizeof(struct fc_ct_hdr))
27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
29
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31
32/**
33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
34 * @ct_hdr: FC GS common transport header
35 * @gid_pn: GID_PN request
36 */
37struct zfcp_fc_gid_pn_req {
38 struct fc_ct_hdr ct_hdr;
39 struct fc_ns_gid_pn gid_pn;
40} __packed;
41
42/**
43 * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
44 * @ct_hdr: FC GS common transport header
45 * @gid_pn: GID_PN response
46 */
47struct zfcp_fc_gid_pn_resp {
48 struct fc_ct_hdr ct_hdr;
49 struct fc_gid_pn_resp gid_pn;
50} __packed;
51
52/**
53 * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
54 * @ct: data passed to zfcp_fsf for issuing fsf request
55 * @sg_req: scatterlist entry for request data
56 * @sg_resp: scatterlist entry for response data
57 * @gid_pn_req: GID_PN request data
58 * @gid_pn_resp: GID_PN response data
59 */
60struct zfcp_fc_gid_pn {
61 struct zfcp_fsf_ct_els ct;
62 struct scatterlist sg_req;
63 struct scatterlist sg_resp;
64 struct zfcp_fc_gid_pn_req gid_pn_req;
65 struct zfcp_fc_gid_pn_resp gid_pn_resp;
66 struct zfcp_port *port;
67};
68
69/**
70 * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
71 * @ct_hdr: FC GS common transport header
72 * @gpn_ft: GPN_FT request
73 */
74struct zfcp_fc_gpn_ft_req {
75 struct fc_ct_hdr ct_hdr;
76 struct fc_ns_gid_ft gpn_ft;
77} __packed;
78
79/**
80 * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
81 * @ct_hdr: FC GS common transport header
82 * @gpn_ft: Array of gpn_ft response data to fill one memory page
83 */
84struct zfcp_fc_gpn_ft_resp {
85 struct fc_ct_hdr ct_hdr;
86 struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
87} __packed;
88
89/**
90 * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
91 * @ct: data passed to zfcp_fsf for issuing fsf request
92 * @sg_req: scatter list entry for gpn_ft request
93 * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
94 */
95struct zfcp_fc_gpn_ft {
96 struct zfcp_fsf_ct_els ct;
97 struct scatterlist sg_req;
98 struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
99};
100
101/**
102 * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
103 * @els: data required for issuing els fsf command
104 * @req: scatterlist entry for ELS ADISC request
105 * @resp: scatterlist entry for ELS ADISC response
106 * @adisc_req: ELS ADISC request data
107 * @adisc_resp: ELS ADISC response data
108 */
109struct zfcp_fc_els_adisc {
110 struct zfcp_fsf_ct_els els;
111 struct scatterlist req;
112 struct scatterlist resp;
113 struct fc_els_adisc adisc_req;
114 struct fc_els_adisc adisc_resp;
115};
116
117/**
118 * enum zfcp_fc_wka_status - FC WKA port status in zfcp
119 * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
120 * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
121 * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
122 * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
123 */
124enum zfcp_fc_wka_status {
125 ZFCP_FC_WKA_PORT_OFFLINE,
126 ZFCP_FC_WKA_PORT_CLOSING,
127 ZFCP_FC_WKA_PORT_OPENING,
128 ZFCP_FC_WKA_PORT_ONLINE,
129};
130
131/**
132 * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
133 * @adapter: Pointer to adapter structure this WKA port belongs to
134 * @completion_wq: Wait for completion of open/close command
135 * @status: Current status of WKA port
136 * @refcount: Reference count to keep port open as long as it is in use
137 * @d_id: FC destination id or well-known-address
138 * @handle: FSF handle for the open WKA port
139 * @mutex: Mutex used during opening/closing state changes
140 * @work: For delaying the closing of the WKA port
141 */
142struct zfcp_fc_wka_port {
143 struct zfcp_adapter *adapter;
144 wait_queue_head_t completion_wq;
145 enum zfcp_fc_wka_status status;
146 atomic_t refcount;
147 u32 d_id;
148 u32 handle;
149 struct mutex mutex;
150 struct delayed_work work;
151};
152
153/**
154 * struct zfcp_fc_wka_ports - Data structures for FC generic services
155 * @ms: FC Management service
156 * @ts: FC time service
157 * @ds: FC directory service
158 * @as: FC alias service
159 */
160struct zfcp_fc_wka_ports {
161 struct zfcp_fc_wka_port ms;
162 struct zfcp_fc_wka_port ts;
163 struct zfcp_fc_wka_port ds;
164 struct zfcp_fc_wka_port as;
165};
166
167/**
168 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
169 * @fcp: fcp_cmnd to setup
170 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
171 */
172static inline
173void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
174{
175 char tag[2];
176
177 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
178
179 if (scsi_populate_tag_msg(scsi, tag)) {
180 switch (tag[0]) {
181 case MSG_ORDERED_TAG:
182 fcp->fc_pri_ta |= FCP_PTA_ORDERED;
183 break;
184 case MSG_SIMPLE_TAG:
185 fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
186 break;
187 };
188 } else
189 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
190
191 if (scsi->sc_data_direction == DMA_FROM_DEVICE)
192 fcp->fc_flags |= FCP_CFL_RDDATA;
193 if (scsi->sc_data_direction == DMA_TO_DEVICE)
194 fcp->fc_flags |= FCP_CFL_WRDATA;
195
196 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
197
198 fcp->fc_dl = scsi_bufflen(scsi);
199}
200
201/**
202 * zfcp_fc_fcp_tm - setup FCP command as task management command
203 * @fcp: fcp_cmnd to setup
204 * @dev: scsi_device where to send the task management command
205 * @tm: task management flags to setup tm command
206 */
207static inline
208void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
209{
210 int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
211 fcp->fc_tm_flags |= tm_flags;
212}
213
214/**
215 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
216 * @fcp_rsp: FCP RSP IU to evaluate
217 * @scsi: SCSI command where to update status and sense buffer
218 */
219static inline
220void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
221 struct scsi_cmnd *scsi)
222{
223 struct fcp_resp_rsp_info *rsp_info;
224 char *sense;
225 u32 sense_len, resid;
226 u8 rsp_flags;
227
228 set_msg_byte(scsi, COMMAND_COMPLETE);
229 scsi->result |= fcp_rsp->resp.fr_status;
230
231 rsp_flags = fcp_rsp->resp.fr_flags;
232
233 if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
234 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
235 if (rsp_info->rsp_code == FCP_TMF_CMPL)
236 set_host_byte(scsi, DID_OK);
237 else {
238 set_host_byte(scsi, DID_ERROR);
239 return;
240 }
241 }
242
243 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
244 sense = (char *) &fcp_rsp[1];
245 if (rsp_flags & FCP_RSP_LEN_VAL)
246 sense += fcp_rsp->ext.fr_sns_len;
247 sense_len = min(fcp_rsp->ext.fr_sns_len,
248 (u32) SCSI_SENSE_BUFFERSIZE);
249 memcpy(scsi->sense_buffer, sense, sense_len);
250 }
251
252 if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
253 resid = fcp_rsp->ext.fr_resid;
254 scsi_set_resid(scsi, resid);
255 if (scsi_bufflen(scsi) - resid < scsi->underflow &&
256 !(rsp_flags & FCP_SNS_LEN_VAL) &&
257 fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
258 set_host_byte(scsi, DID_ERROR);
259 }
260}
261
262#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4e41baa0c141..b3b1d2f79398 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,15 +3,20 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/blktrace_api.h> 12#include <linux/blktrace_api.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_els.h>
13#include "zfcp_ext.h" 15#include "zfcp_ext.h"
16#include "zfcp_fc.h"
14#include "zfcp_dbf.h" 17#include "zfcp_dbf.h"
18#include "zfcp_qdio.h"
19#include "zfcp_reqlist.h"
15 20
16static void zfcp_fsf_request_timeout_handler(unsigned long data) 21static void zfcp_fsf_request_timeout_handler(unsigned long data)
17{ 22{
@@ -122,36 +127,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
122 127
123static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 128static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124{ 129{
130 unsigned long flags;
125 struct fsf_status_read_buffer *sr_buf = req->data; 131 struct fsf_status_read_buffer *sr_buf = req->data;
126 struct zfcp_adapter *adapter = req->adapter; 132 struct zfcp_adapter *adapter = req->adapter;
127 struct zfcp_port *port; 133 struct zfcp_port *port;
128 int d_id = sr_buf->d_id & ZFCP_DID_MASK; 134 int d_id = ntoh24(sr_buf->d_id);
129 unsigned long flags;
130 135
131 read_lock_irqsave(&zfcp_data.config_lock, flags); 136 read_lock_irqsave(&adapter->port_list_lock, flags);
132 list_for_each_entry(port, &adapter->port_list_head, list) 137 list_for_each_entry(port, &adapter->port_list, list)
133 if (port->d_id == d_id) { 138 if (port->d_id == d_id) {
134 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req); 139 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 return; 140 break;
137 } 141 }
138 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 142 read_unlock_irqrestore(&adapter->port_list_lock, flags);
139} 143}
140 144
141static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 145static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down) 146 struct fsf_link_down_info *link_down)
143{ 147{
144 struct zfcp_adapter *adapter = req->adapter; 148 struct zfcp_adapter *adapter = req->adapter;
145 unsigned long flags;
146 149
147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 150 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 return; 151 return;
149 152
150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 153 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 154
152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 zfcp_scsi_schedule_rports_block(adapter); 155 zfcp_scsi_schedule_rports_block(adapter);
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155 156
156 if (!link_down) 157 if (!link_down)
157 goto out; 158 goto out;
@@ -291,7 +292,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
291 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 292 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 req); 293 req);
293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 294 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 schedule_work(&adapter->scan_work); 295 queue_work(adapter->work_queue, &adapter->scan_work);
295 break; 296 break;
296 case FSF_STATUS_READ_CFDC_UPDATED: 297 case FSF_STATUS_READ_CFDC_UPDATED:
297 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 298 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
@@ -317,7 +318,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
317 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 318 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 return; 319 return;
319 case FSF_SQ_COMMAND_ABORTED: 320 case FSF_SQ_COMMAND_ABORTED:
320 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 break; 321 break;
322 case FSF_SQ_NO_RECOM: 322 case FSF_SQ_NO_RECOM:
323 dev_err(&req->adapter->ccw_device->dev, 323 dev_err(&req->adapter->ccw_device->dev,
@@ -358,8 +358,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
358 zfcp_dbf_hba_fsf_response(req); 358 zfcp_dbf_hba_fsf_response(req);
359 359
360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 361 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
362 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 return; 362 return;
364 } 363 }
365 364
@@ -377,7 +376,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
377 case FSF_PROT_ERROR_STATE: 376 case FSF_PROT_ERROR_STATE:
378 case FSF_PROT_SEQ_NUMB_ERROR: 377 case FSF_PROT_SEQ_NUMB_ERROR:
379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); 378 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 req->status |= ZFCP_STATUS_FSFREQ_RETRY; 379 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
381 break; 380 break;
382 case FSF_PROT_UNSUPP_QTCB_TYPE: 381 case FSF_PROT_UNSUPP_QTCB_TYPE:
383 dev_err(&adapter->ccw_device->dev, 382 dev_err(&adapter->ccw_device->dev,
@@ -397,7 +396,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
397 case FSF_PROT_LINK_DOWN: 396 case FSF_PROT_LINK_DOWN:
398 zfcp_fsf_link_down_info_eval(req, "fspse_5", 397 zfcp_fsf_link_down_info_eval(req, "fspse_5",
399 &psq->link_down_info); 398 &psq->link_down_info);
400 /* FIXME: reopening adapter now? better wait for link up */ 399 /* go through reopen to flush pending requests */
401 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); 400 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
402 break; 401 break;
403 case FSF_PROT_REEST_QUEUE: 402 case FSF_PROT_REEST_QUEUE:
@@ -461,15 +460,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
461void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 460void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462{ 461{
463 struct zfcp_fsf_req *req, *tmp; 462 struct zfcp_fsf_req *req, *tmp;
464 unsigned long flags;
465 LIST_HEAD(remove_queue); 463 LIST_HEAD(remove_queue);
466 unsigned int i;
467 464
468 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 465 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 spin_lock_irqsave(&adapter->req_list_lock, flags); 466 zfcp_reqlist_move(adapter->req_list, &remove_queue);
470 for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 list_splice_init(&adapter->req_list[i], &remove_queue);
472 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473 467
474 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 468 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 list_del(&req->list); 469 list_del(&req->list);
@@ -480,18 +474,23 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
480 474
481static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 475static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482{ 476{
483 struct fsf_qtcb_bottom_config *bottom; 477 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
484 struct zfcp_adapter *adapter = req->adapter; 478 struct zfcp_adapter *adapter = req->adapter;
485 struct Scsi_Host *shost = adapter->scsi_host; 479 struct Scsi_Host *shost = adapter->scsi_host;
480 struct fc_els_flogi *nsp, *plogi;
486 481
487 bottom = &req->qtcb->bottom.config; 482 /* adjust pointers for missing command code */
483 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
484 - sizeof(u32));
485 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
486 - sizeof(u32));
488 487
489 if (req->data) 488 if (req->data)
490 memcpy(req->data, bottom, sizeof(*bottom)); 489 memcpy(req->data, bottom, sizeof(*bottom));
491 490
492 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; 491 fc_host_port_name(shost) = nsp->fl_wwpn;
493 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; 492 fc_host_node_name(shost) = nsp->fl_wwnn;
494 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; 493 fc_host_port_id(shost) = ntoh24(bottom->s_id);
495 fc_host_speed(shost) = bottom->fc_link_speed; 494 fc_host_speed(shost) = bottom->fc_link_speed;
496 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 495 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
497 496
@@ -503,9 +502,9 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
503 502
504 switch (bottom->fc_topology) { 503 switch (bottom->fc_topology) {
505 case FSF_TOPO_P2P: 504 case FSF_TOPO_P2P:
506 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; 505 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
507 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 506 adapter->peer_wwpn = plogi->fl_wwpn;
508 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 507 adapter->peer_wwnn = plogi->fl_wwnn;
509 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 508 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 break; 509 break;
511 case FSF_TOPO_FABRIC: 510 case FSF_TOPO_FABRIC:
@@ -616,6 +615,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
616 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 615 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
617 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 616 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
618 fc_host_supported_speeds(shost) = bottom->supported_speed; 617 fc_host_supported_speeds(shost) = bottom->supported_speed;
618 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
619 FC_FC4_LIST_SIZE);
620 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
621 FC_FC4_LIST_SIZE);
619} 622}
620 623
621static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -722,12 +725,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
722 req->adapter = adapter; 725 req->adapter = adapter;
723 req->fsf_command = fsf_cmd; 726 req->fsf_command = fsf_cmd;
724 req->req_id = adapter->req_no; 727 req->req_id = adapter->req_no;
725 req->queue_req.sbal_number = 1; 728 req->qdio_req.sbal_number = 1;
726 req->queue_req.sbal_first = req_q->first; 729 req->qdio_req.sbal_first = req_q->first;
727 req->queue_req.sbal_last = req_q->first; 730 req->qdio_req.sbal_last = req_q->first;
728 req->queue_req.sbale_curr = 1; 731 req->qdio_req.sbale_curr = 1;
729 732
730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 733 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
731 sbale[0].addr = (void *) req->req_id; 734 sbale[0].addr = (void *) req->req_id;
732 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 735 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
733 736
@@ -742,6 +745,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
742 return ERR_PTR(-ENOMEM); 745 return ERR_PTR(-ENOMEM);
743 } 746 }
744 747
748 req->seq_no = adapter->fsf_req_seq_no;
745 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 749 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
746 req->qtcb->prefix.req_id = req->req_id; 750 req->qtcb->prefix.req_id = req->req_id;
747 req->qtcb->prefix.ulp_info = 26; 751 req->qtcb->prefix.ulp_info = 26;
@@ -749,8 +753,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
749 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 753 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
750 req->qtcb->header.req_handle = req->req_id; 754 req->qtcb->header.req_handle = req->req_id;
751 req->qtcb->header.fsf_command = req->fsf_command; 755 req->qtcb->header.fsf_command = req->fsf_command;
752 req->seq_no = adapter->fsf_req_seq_no;
753 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
754 sbale[1].addr = (void *) req->qtcb; 756 sbale[1].addr = (void *) req->qtcb;
755 sbale[1].length = sizeof(struct fsf_qtcb); 757 sbale[1].length = sizeof(struct fsf_qtcb);
756 } 758 }
@@ -767,25 +769,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
767{ 769{
768 struct zfcp_adapter *adapter = req->adapter; 770 struct zfcp_adapter *adapter = req->adapter;
769 struct zfcp_qdio *qdio = adapter->qdio; 771 struct zfcp_qdio *qdio = adapter->qdio;
770 unsigned long flags; 772 int with_qtcb = (req->qtcb != NULL);
771 int idx; 773 int req_id = req->req_id;
772 int with_qtcb = (req->qtcb != NULL);
773 774
774 /* put allocated FSF request into hash table */ 775 zfcp_reqlist_add(adapter->req_list, req);
775 spin_lock_irqsave(&adapter->req_list_lock, flags);
776 idx = zfcp_reqlist_hash(req->req_id);
777 list_add_tail(&req->list, &adapter->req_list[idx]);
778 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
779 776
780 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); 777 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
781 req->issued = get_clock(); 778 req->issued = get_clock();
782 if (zfcp_qdio_send(qdio, &req->queue_req)) { 779 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
783 del_timer(&req->timer); 780 del_timer(&req->timer);
784 spin_lock_irqsave(&adapter->req_list_lock, flags);
785 /* lookup request again, list might have changed */ 781 /* lookup request again, list might have changed */
786 if (zfcp_reqlist_find_safe(adapter, req)) 782 zfcp_reqlist_find_rm(adapter->req_list, req_id);
787 zfcp_reqlist_remove(adapter, req);
788 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
789 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); 783 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
790 return -EIO; 784 return -EIO;
791 } 785 }
@@ -823,9 +817,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
823 goto out; 817 goto out;
824 } 818 }
825 819
826 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 820 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
827 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; 821 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
828 req->queue_req.sbale_curr = 2; 822 req->qdio_req.sbale_curr = 2;
829 823
830 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); 824 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
831 if (!sr_buf) { 825 if (!sr_buf) {
@@ -834,7 +828,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
834 } 828 }
835 memset(sr_buf, 0, sizeof(*sr_buf)); 829 memset(sr_buf, 0, sizeof(*sr_buf));
836 req->data = sr_buf; 830 req->data = sr_buf;
837 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); 831 sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
838 sbale->addr = (void *) sr_buf; 832 sbale->addr = (void *) sr_buf;
839 sbale->length = sizeof(*sr_buf); 833 sbale->length = sizeof(*sr_buf);
840 834
@@ -881,13 +875,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
881 break; 875 break;
882 case FSF_PORT_BOXED: 876 case FSF_PORT_BOXED:
883 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 877 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
884 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 878 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
885 ZFCP_STATUS_FSFREQ_RETRY;
886 break; 879 break;
887 case FSF_LUN_BOXED: 880 case FSF_LUN_BOXED:
888 zfcp_erp_unit_boxed(unit, "fsafch4", req); 881 zfcp_erp_unit_boxed(unit, "fsafch4", req);
889 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 882 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
890 ZFCP_STATUS_FSFREQ_RETRY;
891 break; 883 break;
892 case FSF_ADAPTER_STATUS_AVAILABLE: 884 case FSF_ADAPTER_STATUS_AVAILABLE:
893 switch (fsq->word[0]) { 885 switch (fsq->word[0]) {
@@ -933,7 +925,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
933 ZFCP_STATUS_COMMON_UNBLOCKED))) 925 ZFCP_STATUS_COMMON_UNBLOCKED)))
934 goto out_error_free; 926 goto out_error_free;
935 927
936 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 928 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
937 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 929 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
938 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 930 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
939 931
@@ -958,10 +950,10 @@ out:
958static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 950static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
959{ 951{
960 struct zfcp_adapter *adapter = req->adapter; 952 struct zfcp_adapter *adapter = req->adapter;
961 struct zfcp_send_ct *send_ct = req->data; 953 struct zfcp_fsf_ct_els *ct = req->data;
962 struct fsf_qtcb_header *header = &req->qtcb->header; 954 struct fsf_qtcb_header *header = &req->qtcb->header;
963 955
964 send_ct->status = -EINVAL; 956 ct->status = -EINVAL;
965 957
966 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 958 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
967 goto skip_fsfstatus; 959 goto skip_fsfstatus;
@@ -969,7 +961,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
969 switch (header->fsf_status) { 961 switch (header->fsf_status) {
970 case FSF_GOOD: 962 case FSF_GOOD:
971 zfcp_dbf_san_ct_response(req); 963 zfcp_dbf_san_ct_response(req);
972 send_ct->status = 0; 964 ct->status = 0;
973 break; 965 break;
974 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 966 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
975 zfcp_fsf_class_not_supp(req); 967 zfcp_fsf_class_not_supp(req);
@@ -985,8 +977,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
985 case FSF_ACCESS_DENIED: 977 case FSF_ACCESS_DENIED:
986 break; 978 break;
987 case FSF_PORT_BOXED: 979 case FSF_PORT_BOXED:
988 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 980 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
989 ZFCP_STATUS_FSFREQ_RETRY;
990 break; 981 break;
991 case FSF_PORT_HANDLE_NOT_VALID: 982 case FSF_PORT_HANDLE_NOT_VALID:
992 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 983 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
@@ -1001,8 +992,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1001 } 992 }
1002 993
1003skip_fsfstatus: 994skip_fsfstatus:
1004 if (send_ct->handler) 995 if (ct->handler)
1005 send_ct->handler(send_ct->handler_data); 996 ct->handler(ct->handler_data);
1006} 997}
1007 998
1008static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, 999static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
@@ -1029,7 +1020,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1029{ 1020{
1030 struct zfcp_adapter *adapter = req->adapter; 1021 struct zfcp_adapter *adapter = req->adapter;
1031 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, 1022 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 &req->queue_req); 1023 &req->qdio_req);
1033 u32 feat = adapter->adapter_features; 1024 u32 feat = adapter->adapter_features;
1034 int bytes; 1025 int bytes;
1035 1026
@@ -1047,15 +1038,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1047 return 0; 1038 return 0;
1048 } 1039 }
1049 1040
1050 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1041 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1051 SBAL_FLAGS0_TYPE_WRITE_READ, 1042 SBAL_FLAGS0_TYPE_WRITE_READ,
1052 sg_req, max_sbals); 1043 sg_req, max_sbals);
1053 if (bytes <= 0) 1044 if (bytes <= 0)
1054 return -EIO; 1045 return -EIO;
1055 req->qtcb->bottom.support.req_buf_length = bytes; 1046 req->qtcb->bottom.support.req_buf_length = bytes;
1056 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1047 req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057 1048
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1049 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ, 1050 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals); 1051 sg_resp, max_sbals);
1061 req->qtcb->bottom.support.resp_buf_length = bytes; 1052 req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1068,7 +1059,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1059static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req, 1060 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp, 1061 struct scatterlist *sg_resp,
1071 int max_sbals) 1062 int max_sbals, unsigned int timeout)
1072{ 1063{
1073 int ret; 1064 int ret;
1074 1065
@@ -1077,9 +1068,11 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1077 return ret; 1068 return ret;
1078 1069
1079 /* common settings for ct/gs and els requests */ 1070 /* common settings for ct/gs and els requests */
1071 if (timeout > 255)
1072 timeout = 255; /* max value accepted by hardware */
1080 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1073 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1081 req->qtcb->bottom.support.timeout = 2 * R_A_TOV; 1074 req->qtcb->bottom.support.timeout = timeout;
1082 zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ); 1075 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1083 1076
1084 return 0; 1077 return 0;
1085} 1078}
@@ -1089,9 +1082,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1089 * @ct: pointer to struct zfcp_send_ct with data for request 1082 * @ct: pointer to struct zfcp_send_ct with data for request
1090 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1083 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1091 */ 1084 */
1092int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) 1085int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1086 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1087 unsigned int timeout)
1093{ 1088{
1094 struct zfcp_wka_port *wka_port = ct->wka_port;
1095 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1089 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1096 struct zfcp_fsf_req *req; 1090 struct zfcp_fsf_req *req;
1097 int ret = -EIO; 1091 int ret = -EIO;
@@ -1109,7 +1103,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1109 1103
1110 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1104 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1111 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1105 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1112 FSF_MAX_SBALS_PER_REQ); 1106 FSF_MAX_SBALS_PER_REQ, timeout);
1113 if (ret) 1107 if (ret)
1114 goto failed_send; 1108 goto failed_send;
1115 1109
@@ -1117,7 +1111,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1117 req->qtcb->header.port_handle = wka_port->handle; 1111 req->qtcb->header.port_handle = wka_port->handle;
1118 req->data = ct; 1112 req->data = ct;
1119 1113
1120 zfcp_dbf_san_ct_request(req); 1114 zfcp_dbf_san_ct_request(req, wka_port->d_id);
1121 1115
1122 ret = zfcp_fsf_req_send(req); 1116 ret = zfcp_fsf_req_send(req);
1123 if (ret) 1117 if (ret)
@@ -1134,7 +1128,7 @@ out:
1134 1128
1135static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1129static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1136{ 1130{
1137 struct zfcp_send_els *send_els = req->data; 1131 struct zfcp_fsf_ct_els *send_els = req->data;
1138 struct zfcp_port *port = send_els->port; 1132 struct zfcp_port *port = send_els->port;
1139 struct fsf_qtcb_header *header = &req->qtcb->header; 1133 struct fsf_qtcb_header *header = &req->qtcb->header;
1140 1134
@@ -1154,9 +1148,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1154 case FSF_ADAPTER_STATUS_AVAILABLE: 1148 case FSF_ADAPTER_STATUS_AVAILABLE:
1155 switch (header->fsf_status_qual.word[0]){ 1149 switch (header->fsf_status_qual.word[0]){
1156 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1150 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1157 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1158 zfcp_fc_test_link(port);
1159 /*fall through */
1160 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1151 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1161 case FSF_SQ_RETRY_IF_POSSIBLE: 1152 case FSF_SQ_RETRY_IF_POSSIBLE:
1162 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1153 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1188,10 +1179,11 @@ skip_fsfstatus:
1188 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1179 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1189 * @els: pointer to struct zfcp_send_els with data for the command 1180 * @els: pointer to struct zfcp_send_els with data for the command
1190 */ 1181 */
1191int zfcp_fsf_send_els(struct zfcp_send_els *els) 1182int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1183 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1192{ 1184{
1193 struct zfcp_fsf_req *req; 1185 struct zfcp_fsf_req *req;
1194 struct zfcp_qdio *qdio = els->adapter->qdio; 1186 struct zfcp_qdio *qdio = adapter->qdio;
1195 int ret = -EIO; 1187 int ret = -EIO;
1196 1188
1197 spin_lock_bh(&qdio->req_q_lock); 1189 spin_lock_bh(&qdio->req_q_lock);
@@ -1206,12 +1198,12 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1206 } 1198 }
1207 1199
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1200 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); 1201 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
1210 1202
1211 if (ret) 1203 if (ret)
1212 goto failed_send; 1204 goto failed_send;
1213 1205
1214 req->qtcb->bottom.support.d_id = els->d_id; 1206 hton24(req->qtcb->bottom.support.d_id, d_id);
1215 req->handler = zfcp_fsf_send_els_handler; 1207 req->handler = zfcp_fsf_send_els_handler;
1216 req->data = els; 1208 req->data = els;
1217 1209
@@ -1250,7 +1242,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1250 } 1242 }
1251 1243
1252 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1244 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1253 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1245 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1254 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1246 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1255 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1247 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1256 1248
@@ -1261,13 +1253,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1261 FSF_FEATURE_UPDATE_ALERT; 1253 FSF_FEATURE_UPDATE_ALERT;
1262 req->erp_action = erp_action; 1254 req->erp_action = erp_action;
1263 req->handler = zfcp_fsf_exchange_config_data_handler; 1255 req->handler = zfcp_fsf_exchange_config_data_handler;
1264 erp_action->fsf_req = req; 1256 erp_action->fsf_req_id = req->req_id;
1265 1257
1266 zfcp_fsf_start_erp_timer(req); 1258 zfcp_fsf_start_erp_timer(req);
1267 retval = zfcp_fsf_req_send(req); 1259 retval = zfcp_fsf_req_send(req);
1268 if (retval) { 1260 if (retval) {
1269 zfcp_fsf_req_free(req); 1261 zfcp_fsf_req_free(req);
1270 erp_action->fsf_req = NULL; 1262 erp_action->fsf_req_id = 0;
1271 } 1263 }
1272out: 1264out:
1273 spin_unlock_bh(&qdio->req_q_lock); 1265 spin_unlock_bh(&qdio->req_q_lock);
@@ -1292,7 +1284,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1292 goto out_unlock; 1284 goto out_unlock;
1293 } 1285 }
1294 1286
1295 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1287 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1296 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1288 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1297 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1289 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1298 req->handler = zfcp_fsf_exchange_config_data_handler; 1290 req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1348,19 +1340,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1348 } 1340 }
1349 1341
1350 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1342 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1351 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1343 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1352 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1344 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1353 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1345 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1354 1346
1355 req->handler = zfcp_fsf_exchange_port_data_handler; 1347 req->handler = zfcp_fsf_exchange_port_data_handler;
1356 req->erp_action = erp_action; 1348 req->erp_action = erp_action;
1357 erp_action->fsf_req = req; 1349 erp_action->fsf_req_id = req->req_id;
1358 1350
1359 zfcp_fsf_start_erp_timer(req); 1351 zfcp_fsf_start_erp_timer(req);
1360 retval = zfcp_fsf_req_send(req); 1352 retval = zfcp_fsf_req_send(req);
1361 if (retval) { 1353 if (retval) {
1362 zfcp_fsf_req_free(req); 1354 zfcp_fsf_req_free(req);
1363 erp_action->fsf_req = NULL; 1355 erp_action->fsf_req_id = 0;
1364 } 1356 }
1365out: 1357out:
1366 spin_unlock_bh(&qdio->req_q_lock); 1358 spin_unlock_bh(&qdio->req_q_lock);
@@ -1397,7 +1389,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1397 if (data) 1389 if (data)
1398 req->data = data; 1390 req->data = data;
1399 1391
1400 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1392 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1401 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1393 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1402 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1394 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1403 1395
@@ -1422,7 +1414,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1422{ 1414{
1423 struct zfcp_port *port = req->data; 1415 struct zfcp_port *port = req->data;
1424 struct fsf_qtcb_header *header = &req->qtcb->header; 1416 struct fsf_qtcb_header *header = &req->qtcb->header;
1425 struct fsf_plogi *plogi; 1417 struct fc_els_flogi *plogi;
1426 1418
1427 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1419 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1428 goto out; 1420 goto out;
@@ -1472,23 +1464,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1472 * another GID_PN straight after a port has been opened. 1464 * another GID_PN straight after a port has been opened.
1473 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1465 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1474 */ 1466 */
1475 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1467 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1476 if (req->qtcb->bottom.support.els1_length >= 1468 if (req->qtcb->bottom.support.els1_length >=
1477 FSF_PLOGI_MIN_LEN) { 1469 FSF_PLOGI_MIN_LEN)
1478 if (plogi->serv_param.wwpn != port->wwpn) {
1479 port->d_id = 0;
1480 dev_warn(&port->adapter->ccw_device->dev,
1481 "A port opened with WWPN 0x%016Lx "
1482 "returned data that identifies it as "
1483 "WWPN 0x%016Lx\n",
1484 (unsigned long long) port->wwpn,
1485 (unsigned long long)
1486 plogi->serv_param.wwpn);
1487 } else {
1488 port->wwnn = plogi->serv_param.wwnn;
1489 zfcp_fc_plogi_evaluate(port, plogi); 1470 zfcp_fc_plogi_evaluate(port, plogi);
1490 }
1491 }
1492 break; 1471 break;
1493 case FSF_UNKNOWN_OP_SUBTYPE: 1472 case FSF_UNKNOWN_OP_SUBTYPE:
1494 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1496,7 +1475,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1496 } 1475 }
1497 1476
1498out: 1477out:
1499 zfcp_port_put(port); 1478 put_device(&port->dev);
1500} 1479}
1501 1480
1502/** 1481/**
@@ -1525,23 +1504,23 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1525 } 1504 }
1526 1505
1527 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1506 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1528 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1507 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1529 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1508 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1530 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1509 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1531 1510
1532 req->handler = zfcp_fsf_open_port_handler; 1511 req->handler = zfcp_fsf_open_port_handler;
1533 req->qtcb->bottom.support.d_id = port->d_id; 1512 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1534 req->data = port; 1513 req->data = port;
1535 req->erp_action = erp_action; 1514 req->erp_action = erp_action;
1536 erp_action->fsf_req = req; 1515 erp_action->fsf_req_id = req->req_id;
1537 zfcp_port_get(port); 1516 get_device(&port->dev);
1538 1517
1539 zfcp_fsf_start_erp_timer(req); 1518 zfcp_fsf_start_erp_timer(req);
1540 retval = zfcp_fsf_req_send(req); 1519 retval = zfcp_fsf_req_send(req);
1541 if (retval) { 1520 if (retval) {
1542 zfcp_fsf_req_free(req); 1521 zfcp_fsf_req_free(req);
1543 erp_action->fsf_req = NULL; 1522 erp_action->fsf_req_id = 0;
1544 zfcp_port_put(port); 1523 put_device(&port->dev);
1545 } 1524 }
1546out: 1525out:
1547 spin_unlock_bh(&qdio->req_q_lock); 1526 spin_unlock_bh(&qdio->req_q_lock);
@@ -1595,7 +1574,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1595 } 1574 }
1596 1575
1597 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1576 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1598 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1577 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1599 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1578 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1600 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1579 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1601 1580
@@ -1603,13 +1582,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1603 req->data = erp_action->port; 1582 req->data = erp_action->port;
1604 req->erp_action = erp_action; 1583 req->erp_action = erp_action;
1605 req->qtcb->header.port_handle = erp_action->port->handle; 1584 req->qtcb->header.port_handle = erp_action->port->handle;
1606 erp_action->fsf_req = req; 1585 erp_action->fsf_req_id = req->req_id;
1607 1586
1608 zfcp_fsf_start_erp_timer(req); 1587 zfcp_fsf_start_erp_timer(req);
1609 retval = zfcp_fsf_req_send(req); 1588 retval = zfcp_fsf_req_send(req);
1610 if (retval) { 1589 if (retval) {
1611 zfcp_fsf_req_free(req); 1590 zfcp_fsf_req_free(req);
1612 erp_action->fsf_req = NULL; 1591 erp_action->fsf_req_id = 0;
1613 } 1592 }
1614out: 1593out:
1615 spin_unlock_bh(&qdio->req_q_lock); 1594 spin_unlock_bh(&qdio->req_q_lock);
@@ -1618,11 +1597,11 @@ out:
1618 1597
1619static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1598static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1620{ 1599{
1621 struct zfcp_wka_port *wka_port = req->data; 1600 struct zfcp_fc_wka_port *wka_port = req->data;
1622 struct fsf_qtcb_header *header = &req->qtcb->header; 1601 struct fsf_qtcb_header *header = &req->qtcb->header;
1623 1602
1624 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1603 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1625 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1604 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1626 goto out; 1605 goto out;
1627 } 1606 }
1628 1607
@@ -1635,13 +1614,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1635 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1614 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1636 /* fall through */ 1615 /* fall through */
1637 case FSF_ACCESS_DENIED: 1616 case FSF_ACCESS_DENIED:
1638 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1617 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1639 break; 1618 break;
1640 case FSF_GOOD: 1619 case FSF_GOOD:
1641 wka_port->handle = header->port_handle; 1620 wka_port->handle = header->port_handle;
1642 /* fall through */ 1621 /* fall through */
1643 case FSF_PORT_ALREADY_OPEN: 1622 case FSF_PORT_ALREADY_OPEN:
1644 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1623 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1645 } 1624 }
1646out: 1625out:
1647 wake_up(&wka_port->completion_wq); 1626 wake_up(&wka_port->completion_wq);
@@ -1649,10 +1628,10 @@ out:
1649 1628
1650/** 1629/**
1651 * zfcp_fsf_open_wka_port - create and send open wka-port request 1630 * zfcp_fsf_open_wka_port - create and send open wka-port request
1652 * @wka_port: pointer to struct zfcp_wka_port 1631 * @wka_port: pointer to struct zfcp_fc_wka_port
1653 * Returns: 0 on success, error otherwise 1632 * Returns: 0 on success, error otherwise
1654 */ 1633 */
1655int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) 1634int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1656{ 1635{
1657 struct qdio_buffer_element *sbale; 1636 struct qdio_buffer_element *sbale;
1658 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1637 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1672,12 +1651,12 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1672 } 1651 }
1673 1652
1674 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1653 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1675 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1654 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1655 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1656 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1678 1657
1679 req->handler = zfcp_fsf_open_wka_port_handler; 1658 req->handler = zfcp_fsf_open_wka_port_handler;
1680 req->qtcb->bottom.support.d_id = wka_port->d_id; 1659 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1681 req->data = wka_port; 1660 req->data = wka_port;
1682 1661
1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1662 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1691,23 +1670,23 @@ out:
1691 1670
1692static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1671static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1693{ 1672{
1694 struct zfcp_wka_port *wka_port = req->data; 1673 struct zfcp_fc_wka_port *wka_port = req->data;
1695 1674
1696 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1675 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1697 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1676 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1698 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); 1677 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1699 } 1678 }
1700 1679
1701 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1680 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1702 wake_up(&wka_port->completion_wq); 1681 wake_up(&wka_port->completion_wq);
1703} 1682}
1704 1683
1705/** 1684/**
1706 * zfcp_fsf_close_wka_port - create and send close wka port request 1685 * zfcp_fsf_close_wka_port - create and send close wka port request
1707 * @erp_action: pointer to struct zfcp_erp_action 1686 * @wka_port: WKA port to open
1708 * Returns: 0 on success, error otherwise 1687 * Returns: 0 on success, error otherwise
1709 */ 1688 */
1710int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) 1689int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1711{ 1690{
1712 struct qdio_buffer_element *sbale; 1691 struct qdio_buffer_element *sbale;
1713 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1692 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1727,7 +1706,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1727 } 1706 }
1728 1707
1729 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1708 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1709 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1731 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1710 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1732 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1711 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1733 1712
@@ -1765,13 +1744,13 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1765 /* can't use generic zfcp_erp_modify_port_status because 1744 /* can't use generic zfcp_erp_modify_port_status because
1766 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1745 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1767 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1746 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1768 list_for_each_entry(unit, &port->unit_list_head, list) 1747 read_lock(&port->unit_list_lock);
1748 list_for_each_entry(unit, &port->unit_list, list)
1769 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1749 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1770 &unit->status); 1750 &unit->status);
1751 read_unlock(&port->unit_list_lock);
1771 zfcp_erp_port_boxed(port, "fscpph2", req); 1752 zfcp_erp_port_boxed(port, "fscpph2", req);
1772 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1753 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1773 ZFCP_STATUS_FSFREQ_RETRY;
1774
1775 break; 1754 break;
1776 case FSF_ADAPTER_STATUS_AVAILABLE: 1755 case FSF_ADAPTER_STATUS_AVAILABLE:
1777 switch (header->fsf_status_qual.word[0]) { 1756 switch (header->fsf_status_qual.word[0]) {
@@ -1787,9 +1766,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1787 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1766 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1788 */ 1767 */
1789 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1768 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1790 list_for_each_entry(unit, &port->unit_list_head, list) 1769 read_lock(&port->unit_list_lock);
1770 list_for_each_entry(unit, &port->unit_list, list)
1791 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1771 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1792 &unit->status); 1772 &unit->status);
1773 read_unlock(&port->unit_list_lock);
1793 break; 1774 break;
1794 } 1775 }
1795} 1776}
@@ -1819,7 +1800,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1819 } 1800 }
1820 1801
1821 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1802 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1822 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1803 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1823 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1804 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1824 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1805 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1825 1806
@@ -1827,13 +1808,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1827 req->qtcb->header.port_handle = erp_action->port->handle; 1808 req->qtcb->header.port_handle = erp_action->port->handle;
1828 req->erp_action = erp_action; 1809 req->erp_action = erp_action;
1829 req->handler = zfcp_fsf_close_physical_port_handler; 1810 req->handler = zfcp_fsf_close_physical_port_handler;
1830 erp_action->fsf_req = req; 1811 erp_action->fsf_req_id = req->req_id;
1831 1812
1832 zfcp_fsf_start_erp_timer(req); 1813 zfcp_fsf_start_erp_timer(req);
1833 retval = zfcp_fsf_req_send(req); 1814 retval = zfcp_fsf_req_send(req);
1834 if (retval) { 1815 if (retval) {
1835 zfcp_fsf_req_free(req); 1816 zfcp_fsf_req_free(req);
1836 erp_action->fsf_req = NULL; 1817 erp_action->fsf_req_id = 0;
1837 } 1818 }
1838out: 1819out:
1839 spin_unlock_bh(&qdio->req_q_lock); 1820 spin_unlock_bh(&qdio->req_q_lock);
@@ -1873,8 +1854,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1873 break; 1854 break;
1874 case FSF_PORT_BOXED: 1855 case FSF_PORT_BOXED:
1875 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1856 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1876 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1857 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1877 ZFCP_STATUS_FSFREQ_RETRY;
1878 break; 1858 break;
1879 case FSF_LUN_SHARING_VIOLATION: 1859 case FSF_LUN_SHARING_VIOLATION:
1880 if (header->fsf_status_qual.word[0]) 1860 if (header->fsf_status_qual.word[0])
@@ -1993,7 +1973,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1993 } 1973 }
1994 1974
1995 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1975 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1996 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 1976 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
1997 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1977 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1998 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1978 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1999 1979
@@ -2002,7 +1982,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2002 req->handler = zfcp_fsf_open_unit_handler; 1982 req->handler = zfcp_fsf_open_unit_handler;
2003 req->data = erp_action->unit; 1983 req->data = erp_action->unit;
2004 req->erp_action = erp_action; 1984 req->erp_action = erp_action;
2005 erp_action->fsf_req = req; 1985 erp_action->fsf_req_id = req->req_id;
2006 1986
2007 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1987 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2008 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1988 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2011,7 +1991,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2011 retval = zfcp_fsf_req_send(req); 1991 retval = zfcp_fsf_req_send(req);
2012 if (retval) { 1992 if (retval) {
2013 zfcp_fsf_req_free(req); 1993 zfcp_fsf_req_free(req);
2014 erp_action->fsf_req = NULL; 1994 erp_action->fsf_req_id = 0;
2015 } 1995 }
2016out: 1996out:
2017 spin_unlock_bh(&qdio->req_q_lock); 1997 spin_unlock_bh(&qdio->req_q_lock);
@@ -2036,8 +2016,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2036 break; 2016 break;
2037 case FSF_PORT_BOXED: 2017 case FSF_PORT_BOXED:
2038 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 2018 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2039 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2019 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2040 ZFCP_STATUS_FSFREQ_RETRY;
2041 break; 2020 break;
2042 case FSF_ADAPTER_STATUS_AVAILABLE: 2021 case FSF_ADAPTER_STATUS_AVAILABLE:
2043 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2022 switch (req->qtcb->header.fsf_status_qual.word[0]) {
@@ -2080,7 +2059,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2080 } 2059 }
2081 2060
2082 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2061 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2083 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2062 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2084 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2063 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2085 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2064 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2086 2065
@@ -2089,13 +2068,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2089 req->handler = zfcp_fsf_close_unit_handler; 2068 req->handler = zfcp_fsf_close_unit_handler;
2090 req->data = erp_action->unit; 2069 req->data = erp_action->unit;
2091 req->erp_action = erp_action; 2070 req->erp_action = erp_action;
2092 erp_action->fsf_req = req; 2071 erp_action->fsf_req_id = req->req_id;
2093 2072
2094 zfcp_fsf_start_erp_timer(req); 2073 zfcp_fsf_start_erp_timer(req);
2095 retval = zfcp_fsf_req_send(req); 2074 retval = zfcp_fsf_req_send(req);
2096 if (retval) { 2075 if (retval) {
2097 zfcp_fsf_req_free(req); 2076 zfcp_fsf_req_free(req);
2098 erp_action->fsf_req = NULL; 2077 erp_action->fsf_req_id = 0;
2099 } 2078 }
2100out: 2079out:
2101 spin_unlock_bh(&qdio->req_q_lock); 2080 spin_unlock_bh(&qdio->req_q_lock);
@@ -2109,72 +2088,58 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2109 lat_rec->max = max(lat_rec->max, lat); 2088 lat_rec->max = max(lat_rec->max, lat);
2110} 2089}
2111 2090
2112static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) 2091static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2113{ 2092{
2114 struct fsf_qual_latency_info *lat_inf; 2093 struct fsf_qual_latency_info *lat_in;
2115 struct latency_cont *lat; 2094 struct latency_cont *lat = NULL;
2116 struct zfcp_unit *unit = req->unit; 2095 struct zfcp_unit *unit = req->unit;
2096 struct zfcp_blk_drv_data blktrc;
2097 int ticks = req->adapter->timer_ticks;
2117 2098
2118 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; 2099 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2119 2100
2120 switch (req->qtcb->bottom.io.data_direction) { 2101 blktrc.flags = 0;
2121 case FSF_DATADIR_READ: 2102 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2122 lat = &unit->latencies.read; 2103 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2123 break; 2104 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2124 case FSF_DATADIR_WRITE: 2105 blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
2125 lat = &unit->latencies.write; 2106 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2126 break; 2107
2127 case FSF_DATADIR_CMND: 2108 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2128 lat = &unit->latencies.cmd; 2109 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2129 break; 2110 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2130 default: 2111 blktrc.channel_lat = lat_in->channel_lat * ticks;
2131 return; 2112 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2132 } 2113
2133 2114 switch (req->qtcb->bottom.io.data_direction) {
2134 spin_lock(&unit->latencies.lock); 2115 case FSF_DATADIR_READ:
2135 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); 2116 lat = &unit->latencies.read;
2136 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); 2117 break;
2137 lat->counter++; 2118 case FSF_DATADIR_WRITE:
2138 spin_unlock(&unit->latencies.lock); 2119 lat = &unit->latencies.write;
2139} 2120 break;
2140 2121 case FSF_DATADIR_CMND:
2141#ifdef CONFIG_BLK_DEV_IO_TRACE 2122 lat = &unit->latencies.cmd;
2142static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) 2123 break;
2143{ 2124 }
2144 struct fsf_qual_latency_info *lat_inf;
2145 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2146 struct request *req = scsi_cmnd->request;
2147 struct zfcp_blk_drv_data trace;
2148 int ticks = fsf_req->adapter->timer_ticks;
2149 2125
2150 trace.flags = 0; 2126 if (lat) {
2151 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2127 spin_lock(&unit->latencies.lock);
2152 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { 2128 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2153 trace.flags |= ZFCP_BLK_LAT_VALID; 2129 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2154 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info; 2130 lat->counter++;
2155 trace.channel_lat = lat_inf->channel_lat * ticks; 2131 spin_unlock(&unit->latencies.lock);
2156 trace.fabric_lat = lat_inf->fabric_lat * ticks; 2132 }
2157 } 2133 }
2158 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2159 trace.flags |= ZFCP_BLK_REQ_ERROR;
2160 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2161 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2162 2134
2163 blk_add_driver_data(req->q, req, &trace, sizeof(trace)); 2135 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2164} 2136 sizeof(blktrc));
2165#else
2166static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2167{
2168} 2137}
2169#endif
2170 2138
2171static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2139static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2172{ 2140{
2173 struct scsi_cmnd *scpnt; 2141 struct scsi_cmnd *scpnt;
2174 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2142 struct fcp_resp_with_ext *fcp_rsp;
2175 &(req->qtcb->bottom.io.fcp_rsp);
2176 u32 sns_len;
2177 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2178 unsigned long flags; 2143 unsigned long flags;
2179 2144
2180 read_lock_irqsave(&req->adapter->abort_lock, flags); 2145 read_lock_irqsave(&req->adapter->abort_lock, flags);
@@ -2185,57 +2150,17 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2185 return; 2150 return;
2186 } 2151 }
2187 2152
2188 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2189 set_host_byte(scpnt, DID_SOFT_ERROR);
2190 goto skip_fsfstatus;
2191 }
2192
2193 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2153 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2194 set_host_byte(scpnt, DID_ERROR); 2154 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2195 goto skip_fsfstatus; 2155 goto skip_fsfstatus;
2196 } 2156 }
2197 2157
2198 set_msg_byte(scpnt, COMMAND_COMPLETE); 2158 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2199 2159 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2200 scpnt->result |= fcp_rsp_iu->scsi_status;
2201
2202 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2203 zfcp_fsf_req_latency(req);
2204
2205 zfcp_fsf_trace_latency(req);
2206
2207 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2208 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2209 set_host_byte(scpnt, DID_OK);
2210 else {
2211 set_host_byte(scpnt, DID_ERROR);
2212 goto skip_fsfstatus;
2213 }
2214 }
2215
2216 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2217 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2218 fcp_rsp_iu->fcp_rsp_len;
2219 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2220 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2221
2222 memcpy(scpnt->sense_buffer,
2223 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2224 }
2225 2160
2226 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2227 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2228 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2229 scpnt->underflow)
2230 set_host_byte(scpnt, DID_ERROR);
2231 }
2232skip_fsfstatus: 2161skip_fsfstatus:
2233 if (scpnt->result != 0) 2162 zfcp_fsf_req_trace(req, scpnt);
2234 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); 2163 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
2235 else if (scpnt->retries > 0)
2236 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2237 else
2238 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2239 2164
2240 scpnt->host_scribble = NULL; 2165 scpnt->host_scribble = NULL;
2241 (scpnt->scsi_done) (scpnt); 2166 (scpnt->scsi_done) (scpnt);
@@ -2250,11 +2175,13 @@ skip_fsfstatus:
2250 2175
2251static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) 2176static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2252{ 2177{
2253 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2178 struct fcp_resp_with_ext *fcp_rsp;
2254 &(req->qtcb->bottom.io.fcp_rsp); 2179 struct fcp_resp_rsp_info *rsp_info;
2255 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2180
2181 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2182 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2256 2183
2257 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || 2184 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2258 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2185 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2259 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2186 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2260} 2187}
@@ -2314,13 +2241,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2314 break; 2241 break;
2315 case FSF_PORT_BOXED: 2242 case FSF_PORT_BOXED:
2316 zfcp_erp_port_boxed(unit->port, "fssfch5", req); 2243 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2317 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2244 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2318 ZFCP_STATUS_FSFREQ_RETRY;
2319 break; 2245 break;
2320 case FSF_LUN_BOXED: 2246 case FSF_LUN_BOXED:
2321 zfcp_erp_unit_boxed(unit, "fssfch6", req); 2247 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2322 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2248 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2323 ZFCP_STATUS_FSFREQ_RETRY;
2324 break; 2249 break;
2325 case FSF_ADAPTER_STATUS_AVAILABLE: 2250 case FSF_ADAPTER_STATUS_AVAILABLE:
2326 if (header->fsf_status_qual.word[0] == 2251 if (header->fsf_status_qual.word[0] ==
@@ -2335,24 +2260,10 @@ skip_fsfstatus:
2335 else { 2260 else {
2336 zfcp_fsf_send_fcp_command_task_handler(req); 2261 zfcp_fsf_send_fcp_command_task_handler(req);
2337 req->unit = NULL; 2262 req->unit = NULL;
2338 zfcp_unit_put(unit); 2263 put_device(&unit->dev);
2339 } 2264 }
2340} 2265}
2341 2266
2342static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2343{
2344 u32 *fcp_dl_ptr;
2345
2346 /*
2347 * fcp_dl_addr = start address of fcp_cmnd structure +
2348 * size of fixed part + size of dynamically sized add_dcp_cdb field
2349 * SEE FCP-2 documentation
2350 */
2351 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2352 (fcp_cmd->add_fcp_cdb_length << 2));
2353 *fcp_dl_ptr = fcp_dl;
2354}
2355
2356/** 2267/**
2357 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2268 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2358 * @unit: unit where command is sent to 2269 * @unit: unit where command is sent to
@@ -2362,7 +2273,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2362 struct scsi_cmnd *scsi_cmnd) 2273 struct scsi_cmnd *scsi_cmnd)
2363{ 2274{
2364 struct zfcp_fsf_req *req; 2275 struct zfcp_fsf_req *req;
2365 struct fcp_cmnd_iu *fcp_cmnd_iu; 2276 struct fcp_cmnd *fcp_cmnd;
2366 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2277 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2367 int real_bytes, retval = -EIO; 2278 int real_bytes, retval = -EIO;
2368 struct zfcp_adapter *adapter = unit->port->adapter; 2279 struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2387,23 +2298,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2387 } 2298 }
2388 2299
2389 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2300 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2390 zfcp_unit_get(unit); 2301 get_device(&unit->dev);
2391 req->unit = unit; 2302 req->unit = unit;
2392 req->data = scsi_cmnd; 2303 req->data = scsi_cmnd;
2393 req->handler = zfcp_fsf_send_fcp_command_handler; 2304 req->handler = zfcp_fsf_send_fcp_command_handler;
2394 req->qtcb->header.lun_handle = unit->handle; 2305 req->qtcb->header.lun_handle = unit->handle;
2395 req->qtcb->header.port_handle = unit->port->handle; 2306 req->qtcb->header.port_handle = unit->port->handle;
2396 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2307 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2308 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2397 2309
2398 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2310 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2399 2311
2400 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2401 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2402 /* 2312 /*
2403 * set depending on data direction: 2313 * set depending on data direction:
2404 * data direction bits in SBALE (SB Type) 2314 * data direction bits in SBALE (SB Type)
2405 * data direction bits in QTCB 2315 * data direction bits in QTCB
2406 * data direction bits in FCP_CMND IU
2407 */ 2316 */
2408 switch (scsi_cmnd->sc_data_direction) { 2317 switch (scsi_cmnd->sc_data_direction) {
2409 case DMA_NONE: 2318 case DMA_NONE:
@@ -2411,38 +2320,23 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2411 break; 2320 break;
2412 case DMA_FROM_DEVICE: 2321 case DMA_FROM_DEVICE:
2413 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2322 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2414 fcp_cmnd_iu->rddata = 1;
2415 break; 2323 break;
2416 case DMA_TO_DEVICE: 2324 case DMA_TO_DEVICE:
2417 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; 2325 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2418 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2326 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2419 fcp_cmnd_iu->wddata = 1;
2420 break; 2327 break;
2421 case DMA_BIDIRECTIONAL: 2328 case DMA_BIDIRECTIONAL:
2422 goto failed_scsi_cmnd; 2329 goto failed_scsi_cmnd;
2423 } 2330 }
2424 2331
2425 if (likely((scsi_cmnd->device->simple_tags) || 2332 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2426 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && 2333 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2427 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2428 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2429 else
2430 fcp_cmnd_iu->task_attribute = UNTAGGED;
2431
2432 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2433 fcp_cmnd_iu->add_fcp_cdb_length =
2434 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2435 2334
2436 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2335 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
2437
2438 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2439 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2440
2441 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2442 scsi_sglist(scsi_cmnd), 2336 scsi_sglist(scsi_cmnd),
2443 FSF_MAX_SBALS_PER_REQ); 2337 FSF_MAX_SBALS_PER_REQ);
2444 if (unlikely(real_bytes < 0)) { 2338 if (unlikely(real_bytes < 0)) {
2445 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { 2339 if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2446 dev_err(&adapter->ccw_device->dev, 2340 dev_err(&adapter->ccw_device->dev,
2447 "Oversize data package, unit 0x%016Lx " 2341 "Oversize data package, unit 0x%016Lx "
2448 "on port 0x%016Lx closed\n", 2342 "on port 0x%016Lx closed\n",
@@ -2454,8 +2348,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2454 goto failed_scsi_cmnd; 2348 goto failed_scsi_cmnd;
2455 } 2349 }
2456 2350
2457 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2458
2459 retval = zfcp_fsf_req_send(req); 2351 retval = zfcp_fsf_req_send(req);
2460 if (unlikely(retval)) 2352 if (unlikely(retval))
2461 goto failed_scsi_cmnd; 2353 goto failed_scsi_cmnd;
@@ -2463,7 +2355,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2463 goto out; 2355 goto out;
2464 2356
2465failed_scsi_cmnd: 2357failed_scsi_cmnd:
2466 zfcp_unit_put(unit); 2358 put_device(&unit->dev);
2467 zfcp_fsf_req_free(req); 2359 zfcp_fsf_req_free(req);
2468 scsi_cmnd->host_scribble = NULL; 2360 scsi_cmnd->host_scribble = NULL;
2469out: 2361out:
@@ -2481,7 +2373,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2481{ 2373{
2482 struct qdio_buffer_element *sbale; 2374 struct qdio_buffer_element *sbale;
2483 struct zfcp_fsf_req *req = NULL; 2375 struct zfcp_fsf_req *req = NULL;
2484 struct fcp_cmnd_iu *fcp_cmnd_iu; 2376 struct fcp_cmnd *fcp_cmnd;
2485 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2377 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2486 2378
2487 if (unlikely(!(atomic_read(&unit->status) & 2379 if (unlikely(!(atomic_read(&unit->status) &
@@ -2507,16 +2399,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2507 req->qtcb->header.port_handle = unit->port->handle; 2399 req->qtcb->header.port_handle = unit->port->handle;
2508 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2400 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2509 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2401 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2510 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2402 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2511 sizeof(u32);
2512 2403
2513 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2404 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2514 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2405 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2515 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2406 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2516 2407
2517 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; 2408 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2518 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 2409 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
2519 fcp_cmnd_iu->task_management_flags = tm_flags;
2520 2410
2521 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2411 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2522 if (!zfcp_fsf_req_send(req)) 2412 if (!zfcp_fsf_req_send(req))
@@ -2574,14 +2464,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2574 2464
2575 req->handler = zfcp_fsf_control_file_handler; 2465 req->handler = zfcp_fsf_control_file_handler;
2576 2466
2577 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2467 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2578 sbale[0].flags |= direction; 2468 sbale[0].flags |= direction;
2579 2469
2580 bottom = &req->qtcb->bottom.support; 2470 bottom = &req->qtcb->bottom.support;
2581 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2471 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2582 bottom->option = fsf_cfdc->option; 2472 bottom->option = fsf_cfdc->option;
2583 2473
2584 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, 2474 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2585 direction, fsf_cfdc->sg, 2475 direction, fsf_cfdc->sg,
2586 FSF_MAX_SBALS_PER_REQ); 2476 FSF_MAX_SBALS_PER_REQ);
2587 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2477 if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2612,15 +2502,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2612 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; 2502 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2613 struct qdio_buffer_element *sbale; 2503 struct qdio_buffer_element *sbale;
2614 struct zfcp_fsf_req *fsf_req; 2504 struct zfcp_fsf_req *fsf_req;
2615 unsigned long flags, req_id; 2505 unsigned long req_id;
2616 int idx; 2506 int idx;
2617 2507
2618 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2508 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2619 2509
2620 sbale = &sbal->element[idx]; 2510 sbale = &sbal->element[idx];
2621 req_id = (unsigned long) sbale->addr; 2511 req_id = (unsigned long) sbale->addr;
2622 spin_lock_irqsave(&adapter->req_list_lock, flags); 2512 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2623 fsf_req = zfcp_reqlist_find(adapter, req_id);
2624 2513
2625 if (!fsf_req) 2514 if (!fsf_req)
2626 /* 2515 /*
@@ -2630,11 +2519,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2630 panic("error: unknown req_id (%lx) on adapter %s.\n", 2519 panic("error: unknown req_id (%lx) on adapter %s.\n",
2631 req_id, dev_name(&adapter->ccw_device->dev)); 2520 req_id, dev_name(&adapter->ccw_device->dev));
2632 2521
2633 list_del(&fsf_req->list); 2522 fsf_req->qdio_req.sbal_response = sbal_idx;
2634 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 2523 fsf_req->qdio_req.qdio_inb_usage =
2635
2636 fsf_req->queue_req.sbal_response = sbal_idx;
2637 fsf_req->queue_req.qdio_inb_usage =
2638 atomic_read(&qdio->resp_q.count); 2524 atomic_read(&qdio->resp_q.count);
2639 zfcp_fsf_req_complete(fsf_req); 2525 zfcp_fsf_req_complete(fsf_req);
2640 2526
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index dcc7c1dbcf58..b3de682b64cf 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/pfn.h> 12#include <linux/pfn.h>
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <scsi/libfc.h>
14 15
15#define FSF_QTCB_CURRENT_VERSION 0x00000001 16#define FSF_QTCB_CURRENT_VERSION 0x00000001
16 17
@@ -228,7 +229,8 @@ struct fsf_status_read_buffer {
228 u32 length; 229 u32 length;
229 u32 res1; 230 u32 res1;
230 struct fsf_queue_designator queue_designator; 231 struct fsf_queue_designator queue_designator;
231 u32 d_id; 232 u8 res2;
233 u8 d_id[3];
232 u32 class; 234 u32 class;
233 u64 fcp_lun; 235 u64 fcp_lun;
234 u8 res3[24]; 236 u8 res3[24];
@@ -309,22 +311,7 @@ struct fsf_qtcb_header {
309 u8 res4[16]; 311 u8 res4[16];
310} __attribute__ ((packed)); 312} __attribute__ ((packed));
311 313
312struct fsf_nport_serv_param {
313 u8 common_serv_param[16];
314 u64 wwpn;
315 u64 wwnn;
316 u8 class1_serv_param[16];
317 u8 class2_serv_param[16];
318 u8 class3_serv_param[16];
319 u8 class4_serv_param[16];
320 u8 vendor_version_level[16];
321} __attribute__ ((packed));
322
323#define FSF_PLOGI_MIN_LEN 112 314#define FSF_PLOGI_MIN_LEN 112
324struct fsf_plogi {
325 u32 code;
326 struct fsf_nport_serv_param serv_param;
327} __attribute__ ((packed));
328 315
329#define FSF_FCP_CMND_SIZE 288 316#define FSF_FCP_CMND_SIZE 288
330#define FSF_FCP_RSP_SIZE 128 317#define FSF_FCP_RSP_SIZE 128
@@ -342,8 +329,8 @@ struct fsf_qtcb_bottom_io {
342 329
343struct fsf_qtcb_bottom_support { 330struct fsf_qtcb_bottom_support {
344 u32 operation_subtype; 331 u32 operation_subtype;
345 u8 res1[12]; 332 u8 res1[13];
346 u32 d_id; 333 u8 d_id[3];
347 u32 option; 334 u32 option;
348 u64 fcp_lun; 335 u64 fcp_lun;
349 u64 res2; 336 u64 res2;
@@ -372,18 +359,18 @@ struct fsf_qtcb_bottom_config {
372 u32 fc_topology; 359 u32 fc_topology;
373 u32 fc_link_speed; 360 u32 fc_link_speed;
374 u32 adapter_type; 361 u32 adapter_type;
375 u32 peer_d_id; 362 u8 res0;
363 u8 peer_d_id[3];
376 u8 res1[2]; 364 u8 res1[2];
377 u16 timer_interval; 365 u16 timer_interval;
378 u8 res2[8]; 366 u8 res2[9];
379 u32 s_id; 367 u8 s_id[3];
380 struct fsf_nport_serv_param nport_serv_param; 368 u8 nport_serv_param[128];
381 u8 reserved_nport_serv_param[16];
382 u8 res3[8]; 369 u8 res3[8];
383 u32 adapter_ports; 370 u32 adapter_ports;
384 u32 hardware_version; 371 u32 hardware_version;
385 u8 serial_number[32]; 372 u8 serial_number[32];
386 struct fsf_nport_serv_param plogi_payload; 373 u8 plogi_payload[112];
387 struct fsf_statistics_info stat_info; 374 struct fsf_statistics_info stat_info;
388 u8 res4[112]; 375 u8 res4[112];
389} __attribute__ ((packed)); 376} __attribute__ ((packed));
@@ -450,4 +437,22 @@ struct zfcp_blk_drv_data {
450 u64 fabric_lat; 437 u64 fabric_lat;
451} __attribute__ ((packed)); 438} __attribute__ ((packed));
452 439
440/**
441 * struct zfcp_fsf_ct_els - zfcp data for ct or els request
442 * @req: scatter-gather list for request
443 * @resp: scatter-gather list for response
444 * @handler: handler function (called for response to the request)
445 * @handler_data: data passed to handler function
446 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
447 * @status: used to pass error status to calling function
448 */
449struct zfcp_fsf_ct_els {
450 struct scatterlist *req;
451 struct scatterlist *resp;
452 void (*handler)(void *);
453 void *handler_data;
454 struct zfcp_port *port;
455 int status;
456};
457
453#endif /* FSF_H */ 458#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b627fc..dbfa312a7f50 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -9,7 +9,9 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/slab.h>
12#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14#include "zfcp_qdio.h"
13 15
14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 16#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
15 17
@@ -28,12 +30,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 30 return 0;
29} 31}
30 32
31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{
34 return &q->sbal[sbal_idx]->element[sbale_idx];
35}
36
37static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) 33static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
38{ 34{
39 struct zfcp_adapter *adapter = qdio->adapter; 35 struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +102,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
106 102
107 if (unlikely(retval)) { 103 if (unlikely(retval)) {
108 atomic_set(&queue->count, count); 104 atomic_set(&queue->count, count);
109 /* FIXME: Recover this with an adapter reopen? */ 105 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
110 } else { 106 } else {
111 queue->first += count; 107 queue->first += count;
112 queue->first %= QDIO_MAX_BUFFERS_PER_Q; 108 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +141,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145 zfcp_qdio_resp_put_back(qdio, count); 141 zfcp_qdio_resp_put_back(qdio, count);
146} 142}
147 143
148/**
149 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
150 * @qdio: pointer to struct zfcp_qdio
151 * @q_rec: pointer to struct zfcp_queue_rec
152 * Returns: pointer to qdio_buffer_element (SBALE) structure
153 */
154struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
155 struct zfcp_queue_req *q_req)
156{
157 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
158}
159
160/**
161 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
162 * @fsf_req: pointer to struct fsf_req
163 * Returns: pointer to qdio_buffer_element (SBALE) structure
164 */
165struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
166 struct zfcp_queue_req *q_req)
167{
168 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
169 q_req->sbale_curr);
170}
171
172static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, 144static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
173 struct zfcp_queue_req *q_req, int max_sbals) 145 struct zfcp_qdio_req *q_req, int max_sbals)
174{ 146{
175 int count = atomic_read(&qdio->req_q.count); 147 int count = atomic_read(&qdio->req_q.count);
176 count = min(count, max_sbals); 148 count = min(count, max_sbals);
@@ -179,7 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
179} 151}
180 152
181static struct qdio_buffer_element * 153static struct qdio_buffer_element *
182zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 154zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
183 unsigned long sbtype) 155 unsigned long sbtype)
184{ 156{
185 struct qdio_buffer_element *sbale; 157 struct qdio_buffer_element *sbale;
@@ -214,7 +186,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
214} 186}
215 187
216static struct qdio_buffer_element * 188static struct qdio_buffer_element *
217zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, 189zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
218 unsigned int sbtype) 190 unsigned int sbtype)
219{ 191{
220 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 192 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +196,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
224} 196}
225 197
226static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, 198static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
227 struct zfcp_queue_req *q_req) 199 struct zfcp_qdio_req *q_req)
228{ 200{
229 struct qdio_buffer **sbal = qdio->req_q.sbal; 201 struct qdio_buffer **sbal = qdio->req_q.sbal;
230 int first = q_req->sbal_first; 202 int first = q_req->sbal_first;
@@ -235,7 +207,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
235} 207}
236 208
237static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, 209static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
238 struct zfcp_queue_req *q_req, 210 struct zfcp_qdio_req *q_req,
239 unsigned int sbtype, void *start_addr, 211 unsigned int sbtype, void *start_addr,
240 unsigned int total_length) 212 unsigned int total_length)
241{ 213{
@@ -271,8 +243,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
271 * @max_sbals: upper bound for number of SBALs to be used 243 * @max_sbals: upper bound for number of SBALs to be used
272 * Returns: number of bytes, or error (negativ) 244 * Returns: number of bytes, or error (negativ)
273 */ 245 */
274int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, 246int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
275 struct zfcp_queue_req *q_req,
276 unsigned long sbtype, struct scatterlist *sg, 247 unsigned long sbtype, struct scatterlist *sg,
277 int max_sbals) 248 int max_sbals)
278{ 249{
@@ -304,10 +275,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
304/** 275/**
305 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 276 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
306 * @qdio: pointer to struct zfcp_qdio 277 * @qdio: pointer to struct zfcp_qdio
307 * @q_req: pointer to struct zfcp_queue_req 278 * @q_req: pointer to struct zfcp_qdio_req
308 * Returns: 0 on success, error otherwise 279 * Returns: 0 on success, error otherwise
309 */ 280 */
310int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) 281int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
311{ 282{
312 struct zfcp_qdio_queue *req_q = &qdio->req_q; 283 struct zfcp_qdio_queue *req_q = &qdio->req_q;
313 int first = q_req->sbal_first; 284 int first = q_req->sbal_first;
@@ -349,8 +320,6 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
349 id->input_handler = zfcp_qdio_int_resp; 320 id->input_handler = zfcp_qdio_int_resp;
350 id->output_handler = zfcp_qdio_int_req; 321 id->output_handler = zfcp_qdio_int_req;
351 id->int_parm = (unsigned long) qdio; 322 id->int_parm = (unsigned long) qdio;
352 id->flags = QDIO_INBOUND_0COPY_SBALS |
353 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
354 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); 323 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
355 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); 324 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
356 325
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 000000000000..8cca54631e1e
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
1/*
2 * zfcp device driver
3 *
4 * Header file for zfcp qdio interface
5 *
6 * Copyright IBM Corporation 2010
7 */
8
9#ifndef ZFCP_QDIO_H
10#define ZFCP_QDIO_H
11
12#include <asm/qdio.h>
13
14/**
15 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
16 * @sbal: qdio buffers
17 * @first: index of next free buffer in queue
18 * @count: number of free buffers in queue
19 */
20struct zfcp_qdio_queue {
21 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
22 u8 first;
23 atomic_t count;
24};
25
26/**
27 * struct zfcp_qdio - basic qdio data structure
28 * @resp_q: response queue
29 * @req_q: request queue
30 * @stat_lock: lock to protect req_q_util and req_q_time
31 * @req_q_lock: lock to serialize access to request queue
32 * @req_q_time: time of last fill level change
33 * @req_q_util: used for accounting
34 * @req_q_full: queue full incidents
35 * @req_q_wq: used to wait for SBAL availability
36 * @adapter: adapter used in conjunction with this qdio structure
37 */
38struct zfcp_qdio {
39 struct zfcp_qdio_queue resp_q;
40 struct zfcp_qdio_queue req_q;
41 spinlock_t stat_lock;
42 spinlock_t req_q_lock;
43 unsigned long long req_q_time;
44 u64 req_q_util;
45 atomic_t req_q_full;
46 wait_queue_head_t req_q_wq;
47 struct zfcp_adapter *adapter;
48};
49
50/**
51 * struct zfcp_qdio_req - qdio queue related values for a request
52 * @sbal_number: number of free sbals
53 * @sbal_first: first sbal for this request
54 * @sbal_last: last sbal for this request
55 * @sbal_limit: last possible sbal for this request
56 * @sbale_curr: current sbale at creation of this request
57 * @sbal_response: sbal used in interrupt
58 * @qdio_outb_usage: usage of outbound queue
59 * @qdio_inb_usage: usage of inbound queue
60 */
61struct zfcp_qdio_req {
62 u8 sbal_number;
63 u8 sbal_first;
64 u8 sbal_last;
65 u8 sbal_limit;
66 u8 sbale_curr;
67 u8 sbal_response;
68 u16 qdio_outb_usage;
69 u16 qdio_inb_usage;
70};
71
72/**
73 * zfcp_qdio_sbale - return pointer to sbale in qdio queue
74 * @q: queue where to find sbal
75 * @sbal_idx: sbal index in queue
76 * @sbale_idx: sbale index in sbal
77 */
78static inline struct qdio_buffer_element *
79zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
80{
81 return &q->sbal[sbal_idx]->element[sbale_idx];
82}
83
84/**
85 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
86 * @qdio: pointer to struct zfcp_qdio
87 * @q_rec: pointer to struct zfcp_qdio_req
88 * Returns: pointer to qdio_buffer_element (sbale) structure
89 */
90static inline struct qdio_buffer_element *
91zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
92{
93 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
94}
95
96/**
97 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
98 * @qdio: pointer to struct zfcp_qdio
99 * @fsf_req: pointer to struct zfcp_fsf_req
100 * Returns: pointer to qdio_buffer_element (sbale) structure
101 */
102static inline struct qdio_buffer_element *
103zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
104{
105 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
106 q_req->sbale_curr);
107}
108
109#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 000000000000..a72d1b730aba
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
1/*
2 * zfcp device driver
3 *
4 * Data structure and helper functions for tracking pending FSF
5 * requests.
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_REQLIST_H
11#define ZFCP_REQLIST_H
12
13/* number of hash buckets */
14#define ZFCP_REQ_LIST_BUCKETS 128
15
16/**
17 * struct zfcp_reqlist - Container for request list (reqlist)
18 * @lock: Spinlock for protecting the hash list
19 * @list: Array of hashbuckets, each is a list of requests in this bucket
20 */
21struct zfcp_reqlist {
22 spinlock_t lock;
23 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
24};
25
26static inline int zfcp_reqlist_hash(unsigned long req_id)
27{
28 return req_id % ZFCP_REQ_LIST_BUCKETS;
29}
30
31/**
32 * zfcp_reqlist_alloc - Allocate and initialize reqlist
33 *
34 * Returns pointer to allocated reqlist on success, or NULL on
35 * allocation failure.
36 */
37static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
38{
39 unsigned int i;
40 struct zfcp_reqlist *rl;
41
42 rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
43 if (!rl)
44 return NULL;
45
46 spin_lock_init(&rl->lock);
47
48 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
49 INIT_LIST_HEAD(&rl->buckets[i]);
50
51 return rl;
52}
53
54/**
55 * zfcp_reqlist_isempty - Check whether the request list empty
56 * @rl: pointer to reqlist
57 *
58 * Returns: 1 if list is empty, 0 if not
59 */
60static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
61{
62 unsigned int i;
63
64 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
65 if (!list_empty(&rl->buckets[i]))
66 return 0;
67 return 1;
68}
69
70/**
71 * zfcp_reqlist_free - Free allocated memory for reqlist
72 * @rl: The reqlist where to free memory
73 */
74static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
75{
76 /* sanity check */
77 BUG_ON(!zfcp_reqlist_isempty(rl));
78
79 kfree(rl);
80}
81
82static inline struct zfcp_fsf_req *
83_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
84{
85 struct zfcp_fsf_req *req;
86 unsigned int i;
87
88 i = zfcp_reqlist_hash(req_id);
89 list_for_each_entry(req, &rl->buckets[i], list)
90 if (req->req_id == req_id)
91 return req;
92 return NULL;
93}
94
95/**
96 * zfcp_reqlist_find - Lookup FSF request by its request id
97 * @rl: The reqlist where to lookup the FSF request
98 * @req_id: The request id to look for
99 *
100 * Returns a pointer to the FSF request with the specified request id
101 * or NULL if there is no known FSF request with this id.
102 */
103static inline struct zfcp_fsf_req *
104zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
105{
106 unsigned long flags;
107 struct zfcp_fsf_req *req;
108
109 spin_lock_irqsave(&rl->lock, flags);
110 req = _zfcp_reqlist_find(rl, req_id);
111 spin_unlock_irqrestore(&rl->lock, flags);
112
113 return req;
114}
115
116/**
117 * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
118 * @rl: reqlist where to search and remove entry
119 * @req_id: The request id of the request to look for
120 *
121 * This functions tries to find the FSF request with the specified
122 * id and then removes it from the reqlist. The reqlist lock is held
123 * during both steps of the operation.
124 *
125 * Returns: Pointer to the FSF request if the request has been found,
126 * NULL if it has not been found.
127 */
128static inline struct zfcp_fsf_req *
129zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
130{
131 unsigned long flags;
132 struct zfcp_fsf_req *req;
133
134 spin_lock_irqsave(&rl->lock, flags);
135 req = _zfcp_reqlist_find(rl, req_id);
136 if (req)
137 list_del(&req->list);
138 spin_unlock_irqrestore(&rl->lock, flags);
139
140 return req;
141}
142
143/**
144 * zfcp_reqlist_add - Add entry to reqlist
145 * @rl: reqlist where to add the entry
146 * @req: The entry to add
147 *
148 * The request id always increases. As an optimization new requests
149 * are added here with list_add_tail at the end of the bucket lists
150 * while old requests are looked up starting at the beginning of the
151 * lists.
152 */
153static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
154 struct zfcp_fsf_req *req)
155{
156 unsigned int i;
157 unsigned long flags;
158
159 i = zfcp_reqlist_hash(req->req_id);
160
161 spin_lock_irqsave(&rl->lock, flags);
162 list_add_tail(&req->list, &rl->buckets[i]);
163 spin_unlock_irqrestore(&rl->lock, flags);
164}
165
166/**
167 * zfcp_reqlist_move - Move all entries from reqlist to simple list
168 * @rl: The zfcp_reqlist where to remove all entries
169 * @list: The list where to move all entries
170 */
171static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
172 struct list_head *list)
173{
174 unsigned int i;
175 unsigned long flags;
176
177 spin_lock_irqsave(&rl->lock, flags);
178 for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
179 list_splice_init(&rl->buckets[i], list);
180 spin_unlock_irqrestore(&rl->lock, flags);
181}
182
183#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0e1a34627a2e..174b6d57d576 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,35 +3,41 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <scsi/fc/fc_fcp.h>
12#include <asm/atomic.h> 15#include <asm/atomic.h>
13#include "zfcp_ext.h" 16#include "zfcp_ext.h"
14#include "zfcp_dbf.h" 17#include "zfcp_dbf.h"
18#include "zfcp_fc.h"
19#include "zfcp_reqlist.h"
15 20
16static unsigned int default_depth = 32; 21static unsigned int default_depth = 32;
17module_param_named(queue_depth, default_depth, uint, 0600); 22module_param_named(queue_depth, default_depth, uint, 0600);
18MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 23MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
19 24
20/* Find start of Sense Information in FCP response unit*/ 25static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
21char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 26 int reason)
22{ 27{
23 char *fcp_sns_info_ptr; 28 switch (reason) {
24 29 case SCSI_QDEPTH_DEFAULT:
25 fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1]; 30 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
26 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) 31 break;
27 fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len; 32 case SCSI_QDEPTH_QFULL:
28 33 scsi_track_queue_full(sdev, depth);
29 return fcp_sns_info_ptr; 34 break;
30} 35 case SCSI_QDEPTH_RAMP_UP:
31 36 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
32static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) 37 break;
33{ 38 default:
34 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 39 return -EOPNOTSUPP;
40 }
35 return sdev->queue_depth; 41 return sdev->queue_depth;
36} 42}
37 43
@@ -39,7 +45,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 45{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 46 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
41 unit->device = NULL; 47 unit->device = NULL;
42 zfcp_unit_put(unit); 48 put_device(&unit->dev);
43} 49}
44 50
45static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 51static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -55,10 +61,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
55{ 61{
56 struct zfcp_adapter *adapter = 62 struct zfcp_adapter *adapter =
57 (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 63 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
64
58 set_host_byte(scpnt, result); 65 set_host_byte(scpnt, result);
59 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 66 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
60 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
61 /* return directly */
62 scpnt->scsi_done(scpnt); 67 scpnt->scsi_done(scpnt);
63} 68}
64 69
@@ -82,29 +87,35 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
82 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 87 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
83 unit = scpnt->device->hostdata; 88 unit = scpnt->device->hostdata;
84 89
85 BUG_ON(!adapter || (adapter != unit->port->adapter));
86 BUG_ON(!scpnt->scsi_done);
87
88 if (unlikely(!unit)) {
89 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
90 return 0;
91 }
92
93 scsi_result = fc_remote_port_chkready(rport); 90 scsi_result = fc_remote_port_chkready(rport);
94 if (unlikely(scsi_result)) { 91 if (unlikely(scsi_result)) {
95 scpnt->result = scsi_result; 92 scpnt->result = scsi_result;
96 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); 93 zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
97 scpnt->scsi_done(scpnt); 94 scpnt->scsi_done(scpnt);
98 return 0; 95 return 0;
99 } 96 }
100 97
101 status = atomic_read(&unit->status); 98 status = atomic_read(&unit->status);
102 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || 99 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
103 !(status & ZFCP_STATUS_COMMON_RUNNING))) { 100 !(atomic_read(&unit->port->status) &
101 ZFCP_STATUS_COMMON_ERP_FAILED)) {
102 /* only unit access denied, but port is good
103 * not covered by FC transport, have to fail here */
104 zfcp_scsi_command_fail(scpnt, DID_ERROR); 104 zfcp_scsi_command_fail(scpnt, DID_ERROR);
105 return 0; 105 return 0;
106 } 106 }
107 107
108 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
109 /* This could be either
110 * open unit pending: this is temporary, will result in
111 * open unit or ERP_FAILED, so retry command
112 * call to rport_delete pending: mimic retry from
113 * fc_remote_port_chkready until rport is BLOCKED
114 */
115 zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
116 return 0;
117 }
118
108 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 119 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
109 if (unlikely(ret == -EBUSY)) 120 if (unlikely(ret == -EBUSY))
110 return SCSI_MLQUEUE_DEVICE_BUSY; 121 return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -115,49 +126,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115} 126}
116 127
117static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 128static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
118 int channel, unsigned int id, 129 unsigned int id, u64 lun)
119 unsigned int lun)
120{ 130{
131 unsigned long flags;
121 struct zfcp_port *port; 132 struct zfcp_port *port;
122 struct zfcp_unit *unit; 133 struct zfcp_unit *unit = NULL;
123 int scsi_lun;
124 134
125 list_for_each_entry(port, &adapter->port_list_head, list) { 135 read_lock_irqsave(&adapter->port_list_lock, flags);
136 list_for_each_entry(port, &adapter->port_list, list) {
126 if (!port->rport || (id != port->rport->scsi_target_id)) 137 if (!port->rport || (id != port->rport->scsi_target_id))
127 continue; 138 continue;
128 list_for_each_entry(unit, &port->unit_list_head, list) { 139 unit = zfcp_get_unit_by_lun(port, lun);
129 scsi_lun = scsilun_to_int( 140 if (unit)
130 (struct scsi_lun *)&unit->fcp_lun); 141 break;
131 if (lun == scsi_lun)
132 return unit;
133 }
134 } 142 }
143 read_unlock_irqrestore(&adapter->port_list_lock, flags);
135 144
136 return NULL; 145 return unit;
137} 146}
138 147
139static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 148static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
140{ 149{
141 struct zfcp_adapter *adapter; 150 struct zfcp_adapter *adapter;
142 struct zfcp_unit *unit; 151 struct zfcp_unit *unit;
143 unsigned long flags; 152 u64 lun;
144 int retval = -ENXIO;
145 153
146 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 154 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
147 if (!adapter) 155 if (!adapter)
148 goto out; 156 goto out;
149 157
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 158 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
151 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); 159 unit = zfcp_unit_lookup(adapter, sdp->id, lun);
152 if (unit) { 160 if (unit) {
153 sdp->hostdata = unit; 161 sdp->hostdata = unit;
154 unit->device = sdp; 162 unit->device = sdp;
155 zfcp_unit_get(unit); 163 return 0;
156 retval = 0;
157 } 164 }
158 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
159out: 165out:
160 return retval; 166 return -ENXIO;
161} 167}
162 168
163static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 169static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -176,9 +182,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
176 /* avoid race condition between late normal completion and abort */ 182 /* avoid race condition between late normal completion and abort */
177 write_lock_irqsave(&adapter->abort_lock, flags); 183 write_lock_irqsave(&adapter->abort_lock, flags);
178 184
179 spin_lock(&adapter->req_list_lock); 185 old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
180 old_req = zfcp_reqlist_find(adapter, old_reqid);
181 spin_unlock(&adapter->req_list_lock);
182 if (!old_req) { 186 if (!old_req) {
183 write_unlock_irqrestore(&adapter->abort_lock, flags); 187 write_unlock_irqrestore(&adapter->abort_lock, flags);
184 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, 188 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -196,6 +200,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
196 break; 200 break;
197 201
198 zfcp_erp_wait(adapter); 202 zfcp_erp_wait(adapter);
203 fc_block_scsi_eh(scpnt);
199 if (!(atomic_read(&adapter->status) & 204 if (!(atomic_read(&adapter->status) &
200 ZFCP_STATUS_COMMON_RUNNING)) { 205 ZFCP_STATUS_COMMON_RUNNING)) {
201 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 206 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -235,6 +240,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
235 break; 240 break;
236 241
237 zfcp_erp_wait(adapter); 242 zfcp_erp_wait(adapter);
243 fc_block_scsi_eh(scpnt);
238 if (!(atomic_read(&adapter->status) & 244 if (!(atomic_read(&adapter->status) &
239 ZFCP_STATUS_COMMON_RUNNING)) { 245 ZFCP_STATUS_COMMON_RUNNING)) {
240 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 246 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -249,9 +255,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
249 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 255 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
250 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 256 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
251 retval = FAILED; 257 retval = FAILED;
252 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
253 zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
254 retval = FAILED;
255 } else 258 } else
256 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 259 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
257 260
@@ -261,12 +264,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
261 264
262static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 265static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
263{ 266{
264 return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET); 267 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
265} 268}
266 269
267static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 270static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
268{ 271{
269 return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET); 272 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
270} 273}
271 274
272static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 275static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
@@ -276,6 +279,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
276 279
277 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 280 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
278 zfcp_erp_wait(adapter); 281 zfcp_erp_wait(adapter);
282 fc_block_scsi_eh(scpnt);
279 283
280 return SUCCESS; 284 return SUCCESS;
281} 285}
@@ -303,7 +307,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
303 adapter->scsi_host->max_lun = 1; 307 adapter->scsi_host->max_lun = 1;
304 adapter->scsi_host->max_channel = 0; 308 adapter->scsi_host->max_channel = 0;
305 adapter->scsi_host->unique_id = dev_id.devno; 309 adapter->scsi_host->unique_id = dev_id.devno;
306 adapter->scsi_host->max_cmd_len = 255; 310 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
307 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 311 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
308 312
309 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 313 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -325,12 +329,11 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
325 if (!shost) 329 if (!shost)
326 return; 330 return;
327 331
328 read_lock_irq(&zfcp_data.config_lock); 332 read_lock_irq(&adapter->port_list_lock);
329 list_for_each_entry(port, &adapter->port_list_head, list) 333 list_for_each_entry(port, &adapter->port_list, list)
330 if (port->rport) 334 port->rport = NULL;
331 port->rport = NULL; 335 read_unlock_irq(&adapter->port_list_lock);
332 336
333 read_unlock_irq(&zfcp_data.config_lock);
334 fc_remove_host(shost); 337 fc_remove_host(shost);
335 scsi_remove_host(shost); 338 scsi_remove_host(shost);
336 scsi_host_put(shost); 339 scsi_host_put(shost);
@@ -348,7 +351,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
348 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); 351 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
349 if (!fc_stats) 352 if (!fc_stats)
350 return NULL; 353 return NULL;
351 adapter->fc_stats = fc_stats; /* freed in adater_dequeue */ 354 adapter->fc_stats = fc_stats; /* freed in adapter_release */
352 } 355 }
353 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); 356 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
354 return adapter->fc_stats; 357 return adapter->fc_stats;
@@ -464,7 +467,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
464 adapter->stats_reset = jiffies/HZ; 467 adapter->stats_reset = jiffies/HZ;
465 kfree(adapter->stats_reset_data); 468 kfree(adapter->stats_reset_data);
466 adapter->stats_reset_data = data; /* finally freed in 469 adapter->stats_reset_data = data; /* finally freed in
467 adapter_dequeue */ 470 adapter_release */
468 } 471 }
469} 472}
470 473
@@ -495,7 +498,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
495 * @rport: The FC rport where to teminate I/O 498 * @rport: The FC rport where to teminate I/O
496 * 499 *
497 * Abort all pending SCSI commands for a port by closing the 500 * Abort all pending SCSI commands for a port by closing the
498 * port. Using a reopen for avoids a conflict with a shutdown 501 * port. Using a reopen avoiding a conflict with a shutdown
499 * overwriting a reopen. 502 * overwriting a reopen.
500 */ 503 */
501static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 504static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
@@ -505,15 +508,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
505 struct zfcp_adapter *adapter = 508 struct zfcp_adapter *adapter =
506 (struct zfcp_adapter *)shost->hostdata[0]; 509 (struct zfcp_adapter *)shost->hostdata[0];
507 510
508 write_lock_irq(&zfcp_data.config_lock);
509 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 511 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
510 if (port)
511 zfcp_port_get(port);
512 write_unlock_irq(&zfcp_data.config_lock);
513 512
514 if (port) { 513 if (port) {
515 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 514 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
516 zfcp_port_put(port); 515 put_device(&port->dev);
517 } 516 }
518} 517}
519 518
@@ -555,31 +554,34 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
555 554
556void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 555void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
557{ 556{
558 zfcp_port_get(port); 557 get_device(&port->dev);
559 port->rport_task = RPORT_ADD; 558 port->rport_task = RPORT_ADD;
560 559
561 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 560 if (!queue_work(port->adapter->work_queue, &port->rport_work))
562 zfcp_port_put(port); 561 put_device(&port->dev);
563} 562}
564 563
565void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 564void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
566{ 565{
567 zfcp_port_get(port); 566 get_device(&port->dev);
568 port->rport_task = RPORT_DEL; 567 port->rport_task = RPORT_DEL;
569 568
570 if (port->rport && queue_work(port->adapter->work_queue, 569 if (port->rport && queue_work(port->adapter->work_queue,
571 &port->rport_work)) 570 &port->rport_work))
572 return; 571 return;
573 572
574 zfcp_port_put(port); 573 put_device(&port->dev);
575} 574}
576 575
577void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 576void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
578{ 577{
578 unsigned long flags;
579 struct zfcp_port *port; 579 struct zfcp_port *port;
580 580
581 list_for_each_entry(port, &adapter->port_list_head, list) 581 read_lock_irqsave(&adapter->port_list_lock, flags);
582 list_for_each_entry(port, &adapter->port_list, list)
582 zfcp_scsi_schedule_rport_block(port); 583 zfcp_scsi_schedule_rport_block(port);
584 read_unlock_irqrestore(&adapter->port_list_lock, flags);
583} 585}
584 586
585void zfcp_scsi_rport_work(struct work_struct *work) 587void zfcp_scsi_rport_work(struct work_struct *work)
@@ -597,7 +599,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
597 } 599 }
598 } 600 }
599 601
600 zfcp_port_put(port); 602 put_device(&port->dev);
601} 603}
602 604
603 605
@@ -615,21 +617,7 @@ void zfcp_scsi_scan(struct work_struct *work)
615 scsilun_to_int((struct scsi_lun *) 617 scsilun_to_int((struct scsi_lun *)
616 &unit->fcp_lun), 0); 618 &unit->fcp_lun), 0);
617 619
618 zfcp_unit_put(unit); 620 put_device(&unit->dev);
619}
620
621static int zfcp_execute_fc_job(struct fc_bsg_job *job)
622{
623 switch (job->request->msgcode) {
624 case FC_BSG_RPT_ELS:
625 case FC_BSG_HST_ELS_NOLOGIN:
626 return zfcp_fc_execute_els_fc_job(job);
627 case FC_BSG_RPT_CT:
628 case FC_BSG_HST_CT:
629 return zfcp_fc_execute_ct_fc_job(job);
630 default:
631 return -EINVAL;
632 }
633} 621}
634 622
635struct fc_function_template zfcp_transport_functions = { 623struct fc_function_template zfcp_transport_functions = {
@@ -643,6 +631,7 @@ struct fc_function_template zfcp_transport_functions = {
643 .show_host_port_name = 1, 631 .show_host_port_name = 1,
644 .show_host_permanent_port_name = 1, 632 .show_host_permanent_port_name = 1,
645 .show_host_supported_classes = 1, 633 .show_host_supported_classes = 1,
634 .show_host_supported_fc4s = 1,
646 .show_host_supported_speeds = 1, 635 .show_host_supported_speeds = 1,
647 .show_host_maxframe_size = 1, 636 .show_host_maxframe_size = 1,
648 .show_host_serial_number = 1, 637 .show_host_serial_number = 1,
@@ -652,13 +641,16 @@ struct fc_function_template zfcp_transport_functions = {
652 .get_host_port_state = zfcp_get_host_port_state, 641 .get_host_port_state = zfcp_get_host_port_state,
653 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 642 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
654 .show_host_port_state = 1, 643 .show_host_port_state = 1,
655 .bsg_request = zfcp_execute_fc_job, 644 .show_host_active_fc4s = 1,
645 .bsg_request = zfcp_fc_exec_bsg_job,
646 .bsg_timeout = zfcp_fc_timeout_bsg_job,
656 /* no functions registered for following dynamic attributes but 647 /* no functions registered for following dynamic attributes but
657 directly set by LLDD */ 648 directly set by LLDD */
658 .show_host_port_type = 1, 649 .show_host_port_type = 1,
659 .show_host_speed = 1, 650 .show_host_speed = 1,
660 .show_host_port_id = 1, 651 .show_host_port_id = 1,
661 .disable_target_scan = 1, 652 .disable_target_scan = 1,
653 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
662}; 654};
663 655
664struct zfcp_data zfcp_data = { 656struct zfcp_data zfcp_data = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index d31000886ca8..f5f60698dc4c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,12 +3,13 @@
3 * 3 *
4 * sysfs attributes. 4 * sysfs attributes.
5 * 5 *
6 * Copyright IBM Corporation 2008 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/slab.h>
12#include "zfcp_ext.h" 13#include "zfcp_ext.h"
13 14
14#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ 15#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -19,30 +20,43 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
19 struct device_attribute *at,\ 20 struct device_attribute *at,\
20 char *buf) \ 21 char *buf) \
21{ \ 22{ \
22 struct _feat_def *_feat = dev_get_drvdata(dev); \ 23 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
23 \ 24 \
24 return sprintf(buf, _format, _value); \ 25 return sprintf(buf, _format, _value); \
25} \ 26} \
26static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
27 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
28 29
29ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
30 atomic_read(&adapter->status)); 31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
31ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 32 struct device_attribute *at,\
32 (unsigned long long) adapter->peer_wwnn); 33 char *buf) \
33ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 34{ \
34 (unsigned long long) adapter->peer_wwpn); 35 struct ccw_device *cdev = to_ccwdev(dev); \
35ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 36 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
36 adapter->peer_d_id); 37 int i; \
37ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 38 \
38 adapter->hydra_version); 39 if (!adapter) \
39ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n", 40 return -ENODEV; \
40 adapter->fsf_lic_version); 41 \
41ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n", 42 i = sprintf(buf, _format, _value); \
42 adapter->hardware_version); 43 zfcp_ccw_adapter_put(adapter); \
43ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n", 44 return i; \
44 (atomic_read(&adapter->status) & 45} \
45 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 46static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
47 zfcp_sysfs_adapter_##_name##_show, NULL);
48
49ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
50ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
51 (unsigned long long) adapter->peer_wwnn);
52ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
53 (unsigned long long) adapter->peer_wwpn);
54ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
55ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
56ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
57ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
58ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
59 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
46 60
47ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", 61ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
48 atomic_read(&port->status)); 62 atomic_read(&port->status));
@@ -73,7 +87,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
73 struct device_attribute *attr, \ 87 struct device_attribute *attr, \
74 char *buf) \ 88 char *buf) \
75{ \ 89{ \
76 struct _feat_def *_feat = dev_get_drvdata(dev); \ 90 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
77 \ 91 \
78 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
79 return sprintf(buf, "1\n"); \ 93 return sprintf(buf, "1\n"); \
@@ -84,15 +98,12 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
84 struct device_attribute *attr,\ 98 struct device_attribute *attr,\
85 const char *buf, size_t count)\ 99 const char *buf, size_t count)\
86{ \ 100{ \
87 struct _feat_def *_feat = dev_get_drvdata(dev); \ 101 struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
88 unsigned long val; \ 102 unsigned long val; \
89 int retval = 0; \ 103 int retval = 0; \
90 \ 104 \
91 mutex_lock(&zfcp_data.config_mutex); \ 105 if (!(_feat && get_device(&_feat->dev))) \
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ 106 return -EBUSY; \
93 retval = -EBUSY; \
94 goto out; \
95 } \
96 \ 107 \
97 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 108 if (strict_strtoul(buf, 0, &val) || val != 0) { \
98 retval = -EINVAL; \ 109 retval = -EINVAL; \
@@ -105,29 +116,82 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
105 _reopen_id, NULL); \ 116 _reopen_id, NULL); \
106 zfcp_erp_wait(_adapter); \ 117 zfcp_erp_wait(_adapter); \
107out: \ 118out: \
108 mutex_unlock(&zfcp_data.config_mutex); \ 119 put_device(&_feat->dev); \
109 return retval ? retval : (ssize_t) count; \ 120 return retval ? retval : (ssize_t) count; \
110} \ 121} \
111static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ 122static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
112 zfcp_sysfs_##_feat##_failed_show, \ 123 zfcp_sysfs_##_feat##_failed_show, \
113 zfcp_sysfs_##_feat##_failed_store); 124 zfcp_sysfs_##_feat##_failed_store);
114 125
115ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
116ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 126ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
117ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 127ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
118 128
129static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 struct ccw_device *cdev = to_ccwdev(dev);
134 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
135 int i;
136
137 if (!adapter)
138 return -ENODEV;
139
140 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
141 i = sprintf(buf, "1\n");
142 else
143 i = sprintf(buf, "0\n");
144
145 zfcp_ccw_adapter_put(adapter);
146 return i;
147}
148
149static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
150 struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct ccw_device *cdev = to_ccwdev(dev);
154 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
155 unsigned long val;
156 int retval = 0;
157
158 if (!adapter)
159 return -ENODEV;
160
161 if (strict_strtoul(buf, 0, &val) || val != 0) {
162 retval = -EINVAL;
163 goto out;
164 }
165
166 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
167 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
168 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
169 "syafai2", NULL);
170 zfcp_erp_wait(adapter);
171out:
172 zfcp_ccw_adapter_put(adapter);
173 return retval ? retval : (ssize_t) count;
174}
175static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
176 zfcp_sysfs_adapter_failed_show,
177 zfcp_sysfs_adapter_failed_store);
178
119static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, 179static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
120 struct device_attribute *attr, 180 struct device_attribute *attr,
121 const char *buf, size_t count) 181 const char *buf, size_t count)
122{ 182{
123 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 183 struct ccw_device *cdev = to_ccwdev(dev);
124 int ret; 184 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
125 185
126 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) 186 if (!adapter)
127 return -EBUSY; 187 return -ENODEV;
128 188
129 ret = zfcp_fc_scan_ports(adapter); 189 /* sync the user-space- with the kernel-invocation of scan_work */
130 return ret ? ret : (ssize_t) count; 190 queue_work(adapter->work_queue, &adapter->scan_work);
191 flush_work(&adapter->scan_work);
192 zfcp_ccw_adapter_put(adapter);
193
194 return (ssize_t) count;
131} 195}
132static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 196static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
133 zfcp_sysfs_port_rescan_store); 197 zfcp_sysfs_port_rescan_store);
@@ -136,44 +200,34 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
136 struct device_attribute *attr, 200 struct device_attribute *attr,
137 const char *buf, size_t count) 201 const char *buf, size_t count)
138{ 202{
139 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 203 struct ccw_device *cdev = to_ccwdev(dev);
204 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
140 struct zfcp_port *port; 205 struct zfcp_port *port;
141 u64 wwpn; 206 u64 wwpn;
142 int retval = 0; 207 int retval = -EINVAL;
143 LIST_HEAD(port_remove_lh);
144 208
145 mutex_lock(&zfcp_data.config_mutex); 209 if (!adapter)
146 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 210 return -ENODEV;
147 retval = -EBUSY;
148 goto out;
149 }
150 211
151 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) { 212 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
152 retval = -EINVAL;
153 goto out; 213 goto out;
154 }
155 214
156 write_lock_irq(&zfcp_data.config_lock);
157 port = zfcp_get_port_by_wwpn(adapter, wwpn); 215 port = zfcp_get_port_by_wwpn(adapter, wwpn);
158 if (port && (atomic_read(&port->refcount) == 0)) { 216 if (!port)
159 zfcp_port_get(port);
160 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
161 list_move(&port->list, &port_remove_lh);
162 } else
163 port = NULL;
164 write_unlock_irq(&zfcp_data.config_lock);
165
166 if (!port) {
167 retval = -ENXIO;
168 goto out; 217 goto out;
169 } 218 else
219 retval = 0;
220
221 write_lock_irq(&adapter->port_list_lock);
222 list_del(&port->list);
223 write_unlock_irq(&adapter->port_list_lock);
224
225 put_device(&port->dev);
170 226
171 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 227 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
172 zfcp_erp_wait(adapter); 228 zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
173 zfcp_port_put(port);
174 zfcp_port_dequeue(port);
175 out: 229 out:
176 mutex_unlock(&zfcp_data.config_mutex); 230 zfcp_ccw_adapter_put(adapter);
177 return retval ? retval : (ssize_t) count; 231 return retval ? retval : (ssize_t) count;
178} 232}
179static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, 233static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,16 +256,13 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
202 struct device_attribute *attr, 256 struct device_attribute *attr,
203 const char *buf, size_t count) 257 const char *buf, size_t count)
204{ 258{
205 struct zfcp_port *port = dev_get_drvdata(dev); 259 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
206 struct zfcp_unit *unit; 260 struct zfcp_unit *unit;
207 u64 fcp_lun; 261 u64 fcp_lun;
208 int retval = -EINVAL; 262 int retval = -EINVAL;
209 263
210 mutex_lock(&zfcp_data.config_mutex); 264 if (!(port && get_device(&port->dev)))
211 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 265 return -EBUSY;
212 retval = -EBUSY;
213 goto out;
214 }
215 266
216 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 267 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
217 goto out; 268 goto out;
@@ -219,15 +270,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
219 unit = zfcp_unit_enqueue(port, fcp_lun); 270 unit = zfcp_unit_enqueue(port, fcp_lun);
220 if (IS_ERR(unit)) 271 if (IS_ERR(unit))
221 goto out; 272 goto out;
222 273 else
223 retval = 0; 274 retval = 0;
224 275
225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 276 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
226 zfcp_erp_wait(unit->port->adapter); 277 zfcp_erp_wait(unit->port->adapter);
227 flush_work(&unit->scsi_work); 278 flush_work(&unit->scsi_work);
228 zfcp_unit_put(unit);
229out: 279out:
230 mutex_unlock(&zfcp_data.config_mutex); 280 put_device(&port->dev);
231 return retval ? retval : (ssize_t) count; 281 return retval ? retval : (ssize_t) count;
232} 282}
233static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 283static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -236,54 +286,36 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
236 struct device_attribute *attr, 286 struct device_attribute *attr,
237 const char *buf, size_t count) 287 const char *buf, size_t count)
238{ 288{
239 struct zfcp_port *port = dev_get_drvdata(dev); 289 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
240 struct zfcp_unit *unit; 290 struct zfcp_unit *unit;
241 u64 fcp_lun; 291 u64 fcp_lun;
242 int retval = 0; 292 int retval = -EINVAL;
243 LIST_HEAD(unit_remove_lh);
244 293
245 mutex_lock(&zfcp_data.config_mutex); 294 if (!(port && get_device(&port->dev)))
246 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 295 return -EBUSY;
247 retval = -EBUSY;
248 goto out;
249 }
250 296
251 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) { 297 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
252 retval = -EINVAL;
253 goto out; 298 goto out;
254 }
255 299
256 write_lock_irq(&zfcp_data.config_lock);
257 unit = zfcp_get_unit_by_lun(port, fcp_lun); 300 unit = zfcp_get_unit_by_lun(port, fcp_lun);
258 if (unit) { 301 if (!unit)
259 write_unlock_irq(&zfcp_data.config_lock); 302 goto out;
260 /* wait for possible timeout during SCSI probe */ 303 else
261 flush_work(&unit->scsi_work); 304 retval = 0;
262 write_lock_irq(&zfcp_data.config_lock);
263
264 if (atomic_read(&unit->refcount) == 0) {
265 zfcp_unit_get(unit);
266 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
267 &unit->status);
268 list_move(&unit->list, &unit_remove_lh);
269 } else {
270 unit = NULL;
271 }
272 }
273 305
274 write_unlock_irq(&zfcp_data.config_lock); 306 /* wait for possible timeout during SCSI probe */
307 flush_work(&unit->scsi_work);
275 308
276 if (!unit) { 309 write_lock_irq(&port->unit_list_lock);
277 retval = -ENXIO; 310 list_del(&unit->list);
278 goto out; 311 write_unlock_irq(&port->unit_list_lock);
279 } 312
313 put_device(&unit->dev);
280 314
281 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 315 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
282 zfcp_erp_wait(unit->port->adapter); 316 zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
283 zfcp_unit_put(unit);
284 zfcp_unit_dequeue(unit);
285out: 317out:
286 mutex_unlock(&zfcp_data.config_mutex); 318 put_device(&port->dev);
287 return retval ? retval : (ssize_t) count; 319 return retval ? retval : (ssize_t) count;
288} 320}
289static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 321static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);