aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_3990_erp.c15
-rw-r--r--drivers/s390/block/dasd_diag.c25
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_fba.c19
-rw-r--r--drivers/s390/block/dcssblk.c22
-rw-r--r--drivers/s390/block/xpram.c18
-rw-r--r--drivers/s390/char/con3215.c38
-rw-r--r--drivers/s390/char/con3270.c6
-rw-r--r--drivers/s390/char/fs3270.c34
-rw-r--r--drivers/s390/char/monreader.c78
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/raw3270.c28
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp_cmd.c343
-rw-r--r--drivers/s390/char/sclp_con.c5
-rw-r--r--drivers/s390/char/sclp_config.c17
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c57
-rw-r--r--drivers/s390/char/sclp_quiesce.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/sclp_tty.c261
-rw-r--r--drivers/s390/char/sclp_tty.h53
-rw-r--r--drivers/s390/char/sclp_vt220.c62
-rw-r--r--drivers/s390/char/tape_34xx.c12
-rw-r--r--drivers/s390/char/tape_3590.c21
-rw-r--r--drivers/s390/char/tape_char.c12
-rw-r--r--drivers/s390/char/tape_core.c15
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/char/vmcp.c41
-rw-r--r--drivers/s390/char/vmlogrdr.c37
-rw-r--r--drivers/s390/char/vmur.c27
-rw-r--r--drivers/s390/char/vmwatchdog.c23
-rw-r--r--drivers/s390/char/zcore.c132
-rw-r--r--drivers/s390/cio/Makefile6
-rw-r--r--drivers/s390/cio/airq.c45
-rw-r--r--drivers/s390/cio/chp.c116
-rw-r--r--drivers/s390/cio/chp.h15
-rw-r--r--drivers/s390/cio/chsc.c388
-rw-r--r--drivers/s390/cio/chsc.h28
-rw-r--r--drivers/s390/cio/chsc_sch.c820
-rw-r--r--drivers/s390/cio/chsc_sch.h13
-rw-r--r--drivers/s390/cio/cio.c282
-rw-r--r--drivers/s390/cio/cio.h14
-rw-r--r--drivers/s390/cio/cmf.c20
-rw-r--r--drivers/s390/cio/css.c283
-rw-r--r--drivers/s390/cio/css.h49
-rw-r--r--drivers/s390/cio/device.c476
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c210
-rw-r--r--drivers/s390/cio/device_id.c16
-rw-r--r--drivers/s390/cio/device_ops.c134
-rw-r--r--drivers/s390/cio/device_pgid.c26
-rw-r--r--drivers/s390/cio/device_status.c133
-rw-r--r--drivers/s390/cio/fcx.c350
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/io_sch.h48
-rw-r--r--drivers/s390/cio/ioasm.h2
-rw-r--r--drivers/s390/cio/isc.c68
-rw-r--r--drivers/s390/cio/itcw.c327
-rw-r--r--drivers/s390/cio/qdio.c3934
-rw-r--r--drivers/s390/cio/qdio.h837
-rw-r--r--drivers/s390/cio/qdio_debug.c240
-rw-r--r--drivers/s390/cio/qdio_debug.h91
-rw-r--r--drivers/s390/cio/qdio_main.c1755
-rw-r--r--drivers/s390/cio/qdio_perf.c151
-rw-r--r--drivers/s390/cio/qdio_perf.h54
-rw-r--r--drivers/s390/cio/qdio_setup.c521
-rw-r--r--drivers/s390/cio/qdio_thinint.c380
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/cio/scsw.c843
-rw-r--r--drivers/s390/crypto/ap_bus.c63
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c27
-rw-r--r--drivers/s390/crypto/zcrypt_api.h28
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c3
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c15
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c69
-rw-r--r--drivers/s390/net/claw.c77
-rw-r--r--drivers/s390/net/ctcm_fsms.c12
-rw-r--r--drivers/s390/net/ctcm_main.c28
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/cu3088.h3
-rw-r--r--drivers/s390/net/lcs.c44
-rw-r--r--drivers/s390/net/netiucv.c61
-rw-r--r--drivers/s390/net/qeth_core.h12
-rw-r--r--drivers/s390/net/qeth_core_main.c102
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c34
-rw-r--r--drivers/s390/net/smsgiucv.c10
-rw-r--r--drivers/s390/s390mach.c106
-rw-r--r--drivers/s390/s390mach.h10
-rw-r--r--drivers/s390/scsi/Makefile3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1689
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c152
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c259
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c102
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h341
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3824
-rw-r--r--drivers/s390/scsi/zfcp_ext.h306
-rw-r--r--drivers/s390/scsi/zfcp_fc.c567
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c5573
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h70
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c799
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c784
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c496
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c270
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_driver.c106
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_port.c295
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_unit.c167
114 files changed, 13672 insertions, 17078 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1a4025683362..1b6c52ef7339 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
995 now = get_clock(); 995 now = get_clock();
996 996
997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
998 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 998 cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
999 (unsigned int) intparm); 999 irb->scsw.cmd.dstat), (unsigned int) intparm);
1000 1000
1001 /* check for unsolicited interrupts */ 1001 /* check for unsolicited interrupts */
1002 cqr = (struct dasd_ccw_req *) intparm; 1002 cqr = (struct dasd_ccw_req *) intparm;
1003 if (!cqr || ((irb->scsw.cc == 1) && 1003 if (!cqr || ((irb->scsw.cmd.cc == 1) &&
1004 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1004 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1005 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1005 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
1006 if (cqr && cqr->status == DASD_CQR_IN_IO) 1006 if (cqr && cqr->status == DASD_CQR_IN_IO)
1007 cqr->status = DASD_CQR_QUEUED; 1007 cqr->status = DASD_CQR_QUEUED;
1008 device = dasd_device_from_cdev_locked(cdev); 1008 device = dasd_device_from_cdev_locked(cdev);
@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1025 1025
1026 /* Check for clear pending */ 1026 /* Check for clear pending */
1027 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1027 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1028 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1028 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1029 cqr->status = DASD_CQR_CLEARED; 1029 cqr->status = DASD_CQR_CLEARED;
1030 dasd_device_clear_timer(device); 1030 dasd_device_clear_timer(device);
1031 wake_up(&dasd_flush_wq); 1031 wake_up(&dasd_flush_wq);
@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1041 return; 1041 return;
1042 } 1042 }
1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1044 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1044 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
1045 next = NULL; 1045 next = NULL;
1046 expires = 0; 1046 expires = 0;
1047 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1047 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1048 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1048 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
1049 /* request was completed successfully */ 1049 /* request was completed successfully */
1050 cqr->status = DASD_CQR_SUCCESS; 1050 cqr->status = DASD_CQR_SUCCESS;
1051 cqr->stopclk = now; 1051 cqr->stopclk = now;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e6700df52df4..5c6e6f331cb0 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1572 1572
1573 /* determine the address of the CCW to be restarted */ 1573 /* determine the address of the CCW to be restarted */
1574 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1574 /* Imprecise ending is not set -> addr from IRB-SCSW */
1575 cpa = default_erp->refers->irb.scsw.cpa; 1575 cpa = default_erp->refers->irb.scsw.cmd.cpa;
1576 1576
1577 if (cpa == 0) { 1577 if (cpa == 0) {
1578 1578
@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1725 1725
1726 /* determine the address of the CCW to be restarted */ 1726 /* determine the address of the CCW to be restarted */
1727 /* Imprecise ending is not set -> addr from IRB-SCSW */ 1727 /* Imprecise ending is not set -> addr from IRB-SCSW */
1728 cpa = previous_erp->irb.scsw.cpa; 1728 cpa = previous_erp->irb.scsw.cmd.cpa;
1729 1729
1730 if (cpa == 0) { 1730 if (cpa == 0) {
1731 1731
@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2171{ 2171{
2172 struct dasd_device *device = erp->startdev; 2172 struct dasd_device *device = erp->startdev;
2173 2173
2174 if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK 2174 if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
2175 | SCHN_STAT_CHN_CTRL_CHK)) { 2175 | SCHN_STAT_CHN_CTRL_CHK)) {
2176 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2176 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2177 "channel or interface control check"); 2177 "channel or interface control check");
@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2352 2352
2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) && 2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
2354 (cqr2->irb.esw.esw0.erw.cons == 0)) { 2354 (cqr2->irb.esw.esw0.erw.cons == 0)) {
2355 if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2355 if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2356 SCHN_STAT_CHN_CTRL_CHK)) == 2356 SCHN_STAT_CHN_CTRL_CHK)) ==
2357 (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2357 (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2358 SCHN_STAT_CHN_CTRL_CHK))) 2358 SCHN_STAT_CHN_CTRL_CHK)))
2359 return 1; /* match with ifcc*/ 2359 return 1; /* match with ifcc*/
2360 } 2360 }
@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2622 } 2622 }
2623 2623
2624 /* double-check if current erp/cqr was successfull */ 2624 /* double-check if current erp/cqr was successfull */
2625 if ((cqr->irb.scsw.cstat == 0x00) && 2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
2626 (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2626 (cqr->irb.scsw.cmd.dstat ==
2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
2627 2628
2628 DEV_MESSAGE(KERN_DEBUG, device, 2629 DEV_MESSAGE(KERN_DEBUG, device,
2629 "ERP called for successful request %p" 2630 "ERP called for successful request %p"
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index d91df38ee4f7..85fcb4371054 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -333,7 +333,8 @@ dasd_diag_check_device(struct dasd_device *device)
333 if (IS_ERR(block)) { 333 if (IS_ERR(block)) {
334 DEV_MESSAGE(KERN_WARNING, device, "%s", 334 DEV_MESSAGE(KERN_WARNING, device, "%s",
335 "could not allocate dasd block structure"); 335 "could not allocate dasd block structure");
336 kfree(device->private); 336 device->private = NULL;
337 kfree(private);
337 return PTR_ERR(block); 338 return PTR_ERR(block);
338 } 339 }
339 device->block = block; 340 device->block = block;
@@ -348,7 +349,8 @@ dasd_diag_check_device(struct dasd_device *device)
348 if (rc) { 349 if (rc) {
349 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device " 350 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device "
350 "information (rc=%d)", rc); 351 "information (rc=%d)", rc);
351 return -ENOTSUPP; 352 rc = -EOPNOTSUPP;
353 goto out;
352 } 354 }
353 355
354 /* Figure out position of label block */ 356 /* Figure out position of label block */
@@ -362,7 +364,8 @@ dasd_diag_check_device(struct dasd_device *device)
362 default: 364 default:
363 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class " 365 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class "
364 "(class=%d)", private->rdc_data.vdev_class); 366 "(class=%d)", private->rdc_data.vdev_class);
365 return -ENOTSUPP; 367 rc = -EOPNOTSUPP;
368 goto out;
366 } 369 }
367 370
368 DBF_DEV_EVENT(DBF_INFO, device, 371 DBF_DEV_EVENT(DBF_INFO, device,
@@ -379,7 +382,8 @@ dasd_diag_check_device(struct dasd_device *device)
379 if (label == NULL) { 382 if (label == NULL) {
380 DEV_MESSAGE(KERN_WARNING, device, "%s", 383 DEV_MESSAGE(KERN_WARNING, device, "%s",
381 "No memory to allocate initialization request"); 384 "No memory to allocate initialization request");
382 return -ENOMEM; 385 rc = -ENOMEM;
386 goto out;
383 } 387 }
384 rc = 0; 388 rc = 0;
385 end_block = 0; 389 end_block = 0;
@@ -403,7 +407,7 @@ dasd_diag_check_device(struct dasd_device *device)
403 DEV_MESSAGE(KERN_WARNING, device, "%s", 407 DEV_MESSAGE(KERN_WARNING, device, "%s",
404 "DIAG call failed"); 408 "DIAG call failed");
405 rc = -EOPNOTSUPP; 409 rc = -EOPNOTSUPP;
406 goto out; 410 goto out_label;
407 } 411 }
408 mdsk_term_io(device); 412 mdsk_term_io(device);
409 if (rc == 0) 413 if (rc == 0)
@@ -413,7 +417,7 @@ dasd_diag_check_device(struct dasd_device *device)
413 DEV_MESSAGE(KERN_WARNING, device, "device access failed " 417 DEV_MESSAGE(KERN_WARNING, device, "device access failed "
414 "(rc=%d)", rc); 418 "(rc=%d)", rc);
415 rc = -EIO; 419 rc = -EIO;
416 goto out; 420 goto out_label;
417 } 421 }
418 /* check for label block */ 422 /* check for label block */
419 if (memcmp(label->label_id, DASD_DIAG_CMS1, 423 if (memcmp(label->label_id, DASD_DIAG_CMS1,
@@ -439,8 +443,15 @@ dasd_diag_check_device(struct dasd_device *device)
439 (unsigned long) (block->blocks << 443 (unsigned long) (block->blocks <<
440 block->s2b_shift) >> 1); 444 block->s2b_shift) >> 1);
441 } 445 }
442out: 446out_label:
443 free_page((long) label); 447 free_page((long) label);
448out:
449 if (rc) {
450 device->block = NULL;
451 dasd_free_block(block);
452 device->private = NULL;
453 kfree(private);
454 }
444 return rc; 455 return rc;
445} 456}
446 457
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a0edae091b5e..3590fdb5b2fd 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1404,21 +1404,24 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1404 1404
1405 /* first of all check for state change pending interrupt */ 1405 /* first of all check for state change pending interrupt */
1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1407 if ((irb->scsw.dstat & mask) == mask) { 1407 if ((irb->scsw.cmd.dstat & mask) == mask) {
1408 dasd_generic_handle_state_change(device); 1408 dasd_generic_handle_state_change(device);
1409 return; 1409 return;
1410 } 1410 }
1411 1411
1412 /* summary unit check */ 1412 /* summary unit check */
1413 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { 1413 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
1414 (irb->ecw[7] == 0x0D)) {
1414 dasd_alias_handle_summary_unit_check(device, irb); 1415 dasd_alias_handle_summary_unit_check(device, irb);
1415 return; 1416 return;
1416 } 1417 }
1417 1418
1418 1419
1419 /* service information message SIM */ 1420 /* service information message SIM */
1420 if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) { 1421 if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) &&
1422 ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1421 dasd_3990_erp_handle_sim(device, irb->ecw); 1423 dasd_3990_erp_handle_sim(device, irb->ecw);
1424 dasd_schedule_device_bh(device);
1422 return; 1425 return;
1423 } 1426 }
1424 1427
@@ -2068,11 +2071,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2068 device->cdev->dev.bus_id); 2071 device->cdev->dev.bus_id);
2069 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2072 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2070 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 2073 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2071 irb->scsw.cstat, irb->scsw.dstat); 2074 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
2072 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2075 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2073 " device %s: Failing CCW: %p\n", 2076 " device %s: Failing CCW: %p\n",
2074 device->cdev->dev.bus_id, 2077 device->cdev->dev.bus_id,
2075 (void *) (addr_t) irb->scsw.cpa); 2078 (void *) (addr_t) irb->scsw.cmd.cpa);
2076 if (irb->esw.esw0.erw.cons) { 2079 if (irb->esw.esw0.erw.cons) {
2077 for (sl = 0; sl < 4; sl++) { 2080 for (sl = 0; sl < 4; sl++) {
2078 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2081 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -2122,7 +2125,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2122 /* scsw->cda is either valid or zero */ 2125 /* scsw->cda is either valid or zero */
2123 len = 0; 2126 len = 0;
2124 from = ++to; 2127 from = ++to;
2125 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ 2128 fail = (struct ccw1 *)(addr_t)
2129 irb->scsw.cmd.cpa; /* failing CCW */
2126 if (from < fail - 2) { 2130 if (from < fail - 2) {
2127 from = fail - 2; /* there is a gap - print header */ 2131 from = fail - 2; /* there is a gap - print header */
2128 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2132 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6e53ab606e97..29da4413ad43 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -15,6 +15,7 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/smp_lock.h>
18 19
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20#include <asm/atomic.h> 21#include <asm/atomic.h>
@@ -525,6 +526,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
525 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 526 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
526 if (!eerb) 527 if (!eerb)
527 return -ENOMEM; 528 return -ENOMEM;
529 lock_kernel();
528 eerb->buffer_page_count = eer_pages; 530 eerb->buffer_page_count = eer_pages;
529 if (eerb->buffer_page_count < 1 || 531 if (eerb->buffer_page_count < 1 ||
530 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 532 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -532,6 +534,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
532 MESSAGE(KERN_WARNING, "can't open device since module " 534 MESSAGE(KERN_WARNING, "can't open device since module "
533 "parameter eer_pages is smaller then 1 or" 535 "parameter eer_pages is smaller then 1 or"
534 " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); 536 " bigger then %d", (int)(INT_MAX / PAGE_SIZE));
537 unlock_kernel();
535 return -EINVAL; 538 return -EINVAL;
536 } 539 }
537 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 540 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -539,12 +542,14 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
539 GFP_KERNEL); 542 GFP_KERNEL);
540 if (!eerb->buffer) { 543 if (!eerb->buffer) {
541 kfree(eerb); 544 kfree(eerb);
545 unlock_kernel();
542 return -ENOMEM; 546 return -ENOMEM;
543 } 547 }
544 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 548 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
545 eerb->buffer_page_count)) { 549 eerb->buffer_page_count)) {
546 kfree(eerb->buffer); 550 kfree(eerb->buffer);
547 kfree(eerb); 551 kfree(eerb);
552 unlock_kernel();
548 return -ENOMEM; 553 return -ENOMEM;
549 } 554 }
550 filp->private_data = eerb; 555 filp->private_data = eerb;
@@ -552,6 +557,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
552 list_add(&eerb->list, &bufferlist); 557 list_add(&eerb->list, &bufferlist);
553 spin_unlock_irqrestore(&bufferlock, flags); 558 spin_unlock_irqrestore(&bufferlock, flags);
554 559
560 unlock_kernel();
555 return nonseekable_open(inp,filp); 561 return nonseekable_open(inp,filp);
556} 562}
557 563
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 116611583df8..aa0c533423a5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -139,7 +139,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
139 if (IS_ERR(block)) { 139 if (IS_ERR(block)) {
140 DEV_MESSAGE(KERN_WARNING, device, "%s", 140 DEV_MESSAGE(KERN_WARNING, device, "%s",
141 "could not allocate dasd block structure"); 141 "could not allocate dasd block structure");
142 kfree(device->private); 142 device->private = NULL;
143 kfree(private);
143 return PTR_ERR(block); 144 return PTR_ERR(block);
144 } 145 }
145 device->block = block; 146 device->block = block;
@@ -152,6 +153,10 @@ dasd_fba_check_characteristics(struct dasd_device *device)
152 DEV_MESSAGE(KERN_WARNING, device, 153 DEV_MESSAGE(KERN_WARNING, device,
153 "Read device characteristics returned error %d", 154 "Read device characteristics returned error %d",
154 rc); 155 rc);
156 device->block = NULL;
157 dasd_free_block(block);
158 device->private = NULL;
159 kfree(private);
155 return rc; 160 return rc;
156 } 161 }
157 162
@@ -222,7 +227,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
222 227
223 /* first of all check for state change pending interrupt */ 228 /* first of all check for state change pending interrupt */
224 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 229 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
225 if ((irb->scsw.dstat & mask) == mask) { 230 if ((irb->scsw.cmd.dstat & mask) == mask) {
226 dasd_generic_handle_state_change(device); 231 dasd_generic_handle_state_change(device);
227 return; 232 return;
228 } 233 }
@@ -449,11 +454,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
449 device->cdev->dev.bus_id); 454 device->cdev->dev.bus_id);
450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 455 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
451 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 456 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
452 irb->scsw.cstat, irb->scsw.dstat); 457 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
453 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 458 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
454 " device %s: Failing CCW: %p\n", 459 " device %s: Failing CCW: %p\n",
455 device->cdev->dev.bus_id, 460 device->cdev->dev.bus_id,
456 (void *) (addr_t) irb->scsw.cpa); 461 (void *) (addr_t) irb->scsw.cmd.cpa);
457 if (irb->esw.esw0.erw.cons) { 462 if (irb->esw.esw0.erw.cons) {
458 for (sl = 0; sl < 4; sl++) { 463 for (sl = 0; sl < 4; sl++) {
459 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 464 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@@ -498,11 +503,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
498 503
499 /* print failing CCW area */ 504 /* print failing CCW area */
500 len = 0; 505 len = 0;
501 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { 506 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
502 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; 507 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 508 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
504 } 509 }
505 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); 510 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
506 while (act <= end) { 511 while (act <= end) {
507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 512 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
508 " CCW %p: %08X %08X DAT:", 513 " CCW %p: %08X %08X DAT:",
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index bb52d2fbac18..01fcdd91b846 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
167 struct dcssblk_dev_info *dev_info; 167 struct dcssblk_dev_info *dev_info;
168 int rc; 168 int rc;
169 169
170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 170 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
171 PRINT_WARN("Invalid value, must be 0 or 1\n");
172 return -EINVAL; 171 return -EINVAL;
173 }
174 down_write(&dcssblk_devices_sem); 172 down_write(&dcssblk_devices_sem);
175 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 173 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
176 if (atomic_read(&dev_info->use_count)) { 174 if (atomic_read(&dev_info->use_count)) {
@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
215 set_disk_ro(dev_info->gd, 0); 213 set_disk_ro(dev_info->gd, 0);
216 } 214 }
217 } else { 215 } else {
218 PRINT_WARN("Invalid value, must be 0 or 1\n");
219 rc = -EINVAL; 216 rc = -EINVAL;
220 goto out; 217 goto out;
221 } 218 }
@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
258{ 255{
259 struct dcssblk_dev_info *dev_info; 256 struct dcssblk_dev_info *dev_info;
260 257
261 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { 258 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
262 PRINT_WARN("Invalid value, must be 0 or 1\n");
263 return -EINVAL; 259 return -EINVAL;
264 }
265 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 260 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
266 261
267 down_write(&dcssblk_devices_sem); 262 down_write(&dcssblk_devices_sem);
@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
289 } 284 }
290 } else { 285 } else {
291 up_write(&dcssblk_devices_sem); 286 up_write(&dcssblk_devices_sem);
292 PRINT_WARN("Invalid value, must be 0 or 1\n");
293 return -EINVAL; 287 return -EINVAL;
294 } 288 }
295 up_write(&dcssblk_devices_sem); 289 up_write(&dcssblk_devices_sem);
@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
441 goto out; 435 goto out;
442 436
443unregister_dev: 437unregister_dev:
444 PRINT_ERR("device_create_file() failed!\n");
445 list_del(&dev_info->lh); 438 list_del(&dev_info->lh);
446 blk_cleanup_queue(dev_info->dcssblk_queue); 439 blk_cleanup_queue(dev_info->dcssblk_queue);
447 dev_info->gd->queue = NULL; 440 dev_info->gd->queue = NULL;
@@ -702,10 +695,8 @@ dcssblk_check_params(void)
702static void __exit 695static void __exit
703dcssblk_exit(void) 696dcssblk_exit(void)
704{ 697{
705 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
706 s390_root_dev_unregister(dcssblk_root_dev); 698 s390_root_dev_unregister(dcssblk_root_dev);
707 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 699 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
708 PRINT_DEBUG("...finished!\n");
709} 700}
710 701
711static int __init 702static int __init
@@ -713,27 +704,21 @@ dcssblk_init(void)
713{ 704{
714 int rc; 705 int rc;
715 706
716 PRINT_DEBUG("DCSSBLOCK INIT...\n");
717 dcssblk_root_dev = s390_root_dev_register("dcssblk"); 707 dcssblk_root_dev = s390_root_dev_register("dcssblk");
718 if (IS_ERR(dcssblk_root_dev)) { 708 if (IS_ERR(dcssblk_root_dev))
719 PRINT_ERR("device_register() failed!\n");
720 return PTR_ERR(dcssblk_root_dev); 709 return PTR_ERR(dcssblk_root_dev);
721 }
722 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 710 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
723 if (rc) { 711 if (rc) {
724 PRINT_ERR("device_create_file(add) failed!\n");
725 s390_root_dev_unregister(dcssblk_root_dev); 712 s390_root_dev_unregister(dcssblk_root_dev);
726 return rc; 713 return rc;
727 } 714 }
728 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 715 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
729 if (rc) { 716 if (rc) {
730 PRINT_ERR("device_create_file(remove) failed!\n");
731 s390_root_dev_unregister(dcssblk_root_dev); 717 s390_root_dev_unregister(dcssblk_root_dev);
732 return rc; 718 return rc;
733 } 719 }
734 rc = register_blkdev(0, DCSSBLK_NAME); 720 rc = register_blkdev(0, DCSSBLK_NAME);
735 if (rc < 0) { 721 if (rc < 0) {
736 PRINT_ERR("Can't get dynamic major!\n");
737 s390_root_dev_unregister(dcssblk_root_dev); 722 s390_root_dev_unregister(dcssblk_root_dev);
738 return rc; 723 return rc;
739 } 724 }
@@ -742,7 +727,6 @@ dcssblk_init(void)
742 727
743 dcssblk_check_params(); 728 dcssblk_check_params();
744 729
745 PRINT_DEBUG("...finished!\n");
746 return 0; 730 return 0;
747} 731}
748 732
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index f231bc21b1ca..dd9b986389a2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 100 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
101 if (cc == 3) 101 if (cc == 3)
102 return -ENXIO; 102 return -ENXIO;
103 if (cc == 2) { 103 if (cc == 2)
104 PRINT_ERR("expanded storage lost!\n");
105 return -ENXIO; 104 return -ENXIO;
106 } 105 if (cc == 1)
107 if (cc == 1) {
108 PRINT_ERR("page in failed for page index %u.\n",
109 xpage_index);
110 return -EIO; 106 return -EIO;
111 }
112 return 0; 107 return 0;
113} 108}
114 109
@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
135 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 130 : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
136 if (cc == 3) 131 if (cc == 3)
137 return -ENXIO; 132 return -ENXIO;
138 if (cc == 2) { 133 if (cc == 2)
139 PRINT_ERR("expanded storage lost!\n");
140 return -ENXIO; 134 return -ENXIO;
141 } 135 if (cc == 1)
142 if (cc == 1) {
143 PRINT_ERR("page out failed for page index %u.\n",
144 xpage_index);
145 return -EIO; 136 return -EIO;
146 }
147 return 0; 137 return 0;
148} 138}
149 139
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 3e5653c92f4b..d3ec9b55ab35 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -93,9 +93,6 @@ struct raw3215_info {
93 struct raw3215_req *queued_write;/* pointer to queued write requests */ 93 struct raw3215_req *queued_write;/* pointer to queued write requests */
94 wait_queue_head_t empty_wait; /* wait queue for flushing */ 94 wait_queue_head_t empty_wait; /* wait queue for flushing */
95 struct timer_list timer; /* timer for delayed output */ 95 struct timer_list timer; /* timer for delayed output */
96 char *message; /* pending message from raw3215_irq */
97 int msg_dstat; /* dstat for pending message */
98 int msg_cstat; /* cstat for pending message */
99 int line_pos; /* position on the line (for tabs) */ 96 int line_pos; /* position on the line (for tabs) */
100 char ubuffer[80]; /* copy_from_user buffer */ 97 char ubuffer[80]; /* copy_from_user buffer */
101}; 98};
@@ -359,11 +356,6 @@ raw3215_tasklet(void *data)
359 raw3215_mk_write_req(raw); 356 raw3215_mk_write_req(raw);
360 raw3215_try_io(raw); 357 raw3215_try_io(raw);
361 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 358 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
362 /* Check for pending message from raw3215_irq */
363 if (raw->message != NULL) {
364 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
365 raw->message = NULL;
366 }
367 tty = raw->tty; 359 tty = raw->tty;
368 if (tty != NULL && 360 if (tty != NULL &&
369 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { 361 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
381 struct raw3215_req *req; 373 struct raw3215_req *req;
382 struct tty_struct *tty; 374 struct tty_struct *tty;
383 int cstat, dstat; 375 int cstat, dstat;
384 int count, slen; 376 int count;
385 377
386 raw = cdev->dev.driver_data; 378 raw = cdev->dev.driver_data;
387 req = (struct raw3215_req *) intparm; 379 req = (struct raw3215_req *) intparm;
388 cstat = irb->scsw.cstat; 380 cstat = irb->scsw.cmd.cstat;
389 dstat = irb->scsw.dstat; 381 dstat = irb->scsw.cmd.dstat;
390 if (cstat != 0) { 382 if (cstat != 0)
391 raw->message = KERN_WARNING
392 "Got nonzero channel status in raw3215_irq "
393 "(dev sts 0x%2x, sch sts 0x%2x)";
394 raw->msg_dstat = dstat;
395 raw->msg_cstat = cstat;
396 tasklet_schedule(&raw->tasklet); 383 tasklet_schedule(&raw->tasklet);
397 }
398 if (dstat & 0x01) { /* we got a unit exception */ 384 if (dstat & 0x01) { /* we got a unit exception */
399 dstat &= ~0x01; /* we can ignore it */ 385 dstat &= ~0x01; /* we can ignore it */
400 } 386 }
@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
404 break; 390 break;
405 /* Attention interrupt, someone hit the enter key */ 391 /* Attention interrupt, someone hit the enter key */
406 raw3215_mk_read_req(raw); 392 raw3215_mk_read_req(raw);
407 if (MACHINE_IS_P390)
408 memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
409 tasklet_schedule(&raw->tasklet); 393 tasklet_schedule(&raw->tasklet);
410 break; 394 break;
411 case 0x08: 395 case 0x08:
@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
415 return; /* That shouldn't happen ... */ 399 return; /* That shouldn't happen ... */
416 if (req->type == RAW3215_READ) { 400 if (req->type == RAW3215_READ) {
417 /* store residual count, then wait for device end */ 401 /* store residual count, then wait for device end */
418 req->residual = irb->scsw.count; 402 req->residual = irb->scsw.cmd.count;
419 } 403 }
420 if (dstat == 0x08) 404 if (dstat == 0x08)
421 break; 405 break;
@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
428 412
429 tty = raw->tty; 413 tty = raw->tty;
430 count = 160 - req->residual; 414 count = 160 - req->residual;
431 if (MACHINE_IS_P390) {
432 slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
433 if (count > slen)
434 count = slen;
435 } else
436 EBCASC(raw->inbuf, count); 415 EBCASC(raw->inbuf, count);
437 cchar = ctrlchar_handle(raw->inbuf, count, tty); 416 cchar = ctrlchar_handle(raw->inbuf, count, tty);
438 switch (cchar & CTRLCHAR_MASK) { 417 switch (cchar & CTRLCHAR_MASK) {
@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
481 raw->flags &= ~RAW3215_WORKING; 460 raw->flags &= ~RAW3215_WORKING;
482 raw3215_free_req(req); 461 raw3215_free_req(req);
483 } 462 }
484 raw->message = KERN_WARNING
485 "Spurious interrupt in in raw3215_irq "
486 "(dev sts 0x%2x, sch sts 0x%2x)";
487 raw->msg_dstat = dstat;
488 raw->msg_cstat = cstat;
489 tasklet_schedule(&raw->tasklet); 463 tasklet_schedule(&raw->tasklet);
490 } 464 }
491 return; 465 return;
@@ -883,7 +857,6 @@ con3215_init(void)
883 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); 857 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
884 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); 858 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
885 raw3215[0] = NULL; 859 raw3215[0] = NULL;
886 printk("Couldn't find a 3215 console device\n");
887 return -ENODEV; 860 return -ENODEV;
888 } 861 }
889 register_console(&con3215); 862 register_console(&con3215);
@@ -1157,7 +1130,6 @@ tty3215_init(void)
1157 tty_set_operations(driver, &tty3215_ops); 1130 tty_set_operations(driver, &tty3215_ops);
1158 ret = tty_register_driver(driver); 1131 ret = tty_register_driver(driver);
1159 if (ret) { 1132 if (ret) {
1160 printk("Couldn't register tty3215 driver\n");
1161 put_tty_driver(driver); 1133 put_tty_driver(driver);
1162 return ret; 1134 return ret;
1163 } 1135 }
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 0b040557db02..3c07974886ed 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -411,15 +411,15 @@ static int
411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) 411con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
412{ 412{
413 /* Handle ATTN. Schedule tasklet to read aid. */ 413 /* Handle ATTN. Schedule tasklet to read aid. */
414 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 414 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
415 con3270_issue_read(cp); 415 con3270_issue_read(cp);
416 416
417 if (rq) { 417 if (rq) {
418 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 418 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
419 rq->rc = -EIO; 419 rq->rc = -EIO;
420 else 420 else
421 /* Normal end. Copy residual count. */ 421 /* Normal end. Copy residual count. */
422 rq->rescnt = irb->scsw.count; 422 rq->rescnt = irb->scsw.cmd.count;
423 } 423 }
424 return RAW3270_IO_DONE; 424 return RAW3270_IO_DONE;
425} 425}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index ef36f2132aa4..d18e6d2e0b49 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/smp_lock.h>
17 18
18#include <asm/ccwdev.h> 19#include <asm/ccwdev.h>
19#include <asm/cio.h> 20#include <asm/cio.h>
@@ -216,17 +217,17 @@ static int
216fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) 217fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
217{ 218{
218 /* Handle ATTN. Set indication and wake waiters for attention. */ 219 /* Handle ATTN. Set indication and wake waiters for attention. */
219 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 220 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
220 fp->attention = 1; 221 fp->attention = 1;
221 wake_up(&fp->wait); 222 wake_up(&fp->wait);
222 } 223 }
223 224
224 if (rq) { 225 if (rq) {
225 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 226 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
226 rq->rc = -EIO; 227 rq->rc = -EIO;
227 else 228 else
228 /* Normal end. Copy residual count. */ 229 /* Normal end. Copy residual count. */
229 rq->rescnt = irb->scsw.count; 230 rq->rescnt = irb->scsw.cmd.count;
230 } 231 }
231 return RAW3270_IO_DONE; 232 return RAW3270_IO_DONE;
232} 233}
@@ -421,6 +422,7 @@ fs3270_open(struct inode *inode, struct file *filp)
421 422
422 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) 423 if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
423 return -ENODEV; 424 return -ENODEV;
425 lock_kernel();
424 minor = iminor(filp->f_path.dentry->d_inode); 426 minor = iminor(filp->f_path.dentry->d_inode);
425 /* Check for minor 0 multiplexer. */ 427 /* Check for minor 0 multiplexer. */
426 if (minor == 0) { 428 if (minor == 0) {
@@ -429,7 +431,8 @@ fs3270_open(struct inode *inode, struct file *filp)
429 tty = get_current_tty(); 431 tty = get_current_tty();
430 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { 432 if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
431 mutex_unlock(&tty_mutex); 433 mutex_unlock(&tty_mutex);
432 return -ENODEV; 434 rc = -ENODEV;
435 goto out;
433 } 436 }
434 minor = tty->index + RAW3270_FIRSTMINOR; 437 minor = tty->index + RAW3270_FIRSTMINOR;
435 mutex_unlock(&tty_mutex); 438 mutex_unlock(&tty_mutex);
@@ -438,19 +441,22 @@ fs3270_open(struct inode *inode, struct file *filp)
438 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 441 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
439 if (!IS_ERR(fp)) { 442 if (!IS_ERR(fp)) {
440 raw3270_put_view(&fp->view); 443 raw3270_put_view(&fp->view);
441 return -EBUSY; 444 rc = -EBUSY;
445 goto out;
442 } 446 }
443 /* Allocate fullscreen view structure. */ 447 /* Allocate fullscreen view structure. */
444 fp = fs3270_alloc_view(); 448 fp = fs3270_alloc_view();
445 if (IS_ERR(fp)) 449 if (IS_ERR(fp)) {
446 return PTR_ERR(fp); 450 rc = PTR_ERR(fp);
451 goto out;
452 }
447 453
448 init_waitqueue_head(&fp->wait); 454 init_waitqueue_head(&fp->wait);
449 fp->fs_pid = get_pid(task_pid(current)); 455 fp->fs_pid = get_pid(task_pid(current));
450 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); 456 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
451 if (rc) { 457 if (rc) {
452 fs3270_free_view(&fp->view); 458 fs3270_free_view(&fp->view);
453 return rc; 459 goto out;
454 } 460 }
455 461
456 /* Allocate idal-buffer. */ 462 /* Allocate idal-buffer. */
@@ -458,7 +464,8 @@ fs3270_open(struct inode *inode, struct file *filp)
458 if (IS_ERR(ib)) { 464 if (IS_ERR(ib)) {
459 raw3270_put_view(&fp->view); 465 raw3270_put_view(&fp->view);
460 raw3270_del_view(&fp->view); 466 raw3270_del_view(&fp->view);
461 return PTR_ERR(fp); 467 rc = PTR_ERR(fp);
468 goto out;
462 } 469 }
463 fp->rdbuf = ib; 470 fp->rdbuf = ib;
464 471
@@ -466,9 +473,11 @@ fs3270_open(struct inode *inode, struct file *filp)
466 if (rc) { 473 if (rc) {
467 raw3270_put_view(&fp->view); 474 raw3270_put_view(&fp->view);
468 raw3270_del_view(&fp->view); 475 raw3270_del_view(&fp->view);
469 return rc; 476 goto out;
470 } 477 }
471 filp->private_data = fp; 478 filp->private_data = fp;
479out:
480 unlock_kernel();
472 return 0; 481 return 0;
473} 482}
474 483
@@ -512,11 +521,8 @@ fs3270_init(void)
512 int rc; 521 int rc;
513 522
514 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); 523 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
515 if (rc) { 524 if (rc)
516 printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
517 IBM_FS3270_MAJOR, rc);
518 return rc; 525 return rc;
519 }
520 return 0; 526 return 0;
521} 527}
522 528
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 1e1f50655bbf..35fd8dfcaaa6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,14 +3,14 @@
3 * 3 *
4 * Character device driver for reading z/VM *MONITOR service records. 4 * Character device driver for reading z/VM *MONITOR service records.
5 * 5 *
6 * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright IBM Corp. 2004, 2008
7 * 7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */ 8 */
10 9
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
13#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/smp_lock.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -18,12 +18,11 @@
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/poll.h>
22#include <net/iucv/iucv.h>
21#include <asm/uaccess.h> 23#include <asm/uaccess.h>
22#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
23#include <asm/extmem.h> 25#include <asm/extmem.h>
24#include <linux/poll.h>
25#include <net/iucv/iucv.h>
26
27 26
28//#define MON_DEBUG /* Debug messages on/off */ 27//#define MON_DEBUG /* Debug messages on/off */
29 28
@@ -152,10 +151,7 @@ static int mon_check_mca(struct mon_msg *monmsg)
152 (mon_mca_end(monmsg) > mon_dcss_end) || 151 (mon_mca_end(monmsg) > mon_dcss_end) ||
153 (mon_mca_start(monmsg) < mon_dcss_start) || 152 (mon_mca_start(monmsg) < mon_dcss_start) ||
154 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) 153 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
155 {
156 P_DEBUG("READ, IGNORED INVALID MCA\n\n");
157 return -EINVAL; 154 return -EINVAL;
158 }
159 return 0; 155 return 0;
160} 156}
161 157
@@ -164,10 +160,6 @@ static int mon_send_reply(struct mon_msg *monmsg,
164{ 160{
165 int rc; 161 int rc;
166 162
167 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
168 "0x%08X\n\n",
169 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
170
171 rc = iucv_message_reply(monpriv->path, &monmsg->msg, 163 rc = iucv_message_reply(monpriv->path, &monmsg->msg,
172 IUCV_IPRMDATA, NULL, 0); 164 IUCV_IPRMDATA, NULL, 0);
173 atomic_dec(&monpriv->msglim_count); 165 atomic_dec(&monpriv->msglim_count);
@@ -202,15 +194,12 @@ static struct mon_private *mon_alloc_mem(void)
202 struct mon_private *monpriv; 194 struct mon_private *monpriv;
203 195
204 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 196 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
205 if (!monpriv) { 197 if (!monpriv)
206 P_ERROR("no memory for monpriv\n");
207 return NULL; 198 return NULL;
208 }
209 for (i = 0; i < MON_MSGLIM; i++) { 199 for (i = 0; i < MON_MSGLIM; i++) {
210 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), 200 monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
211 GFP_KERNEL); 201 GFP_KERNEL);
212 if (!monpriv->msg_array[i]) { 202 if (!monpriv->msg_array[i]) {
213 P_ERROR("open, no memory for msg_array\n");
214 mon_free_mem(monpriv); 203 mon_free_mem(monpriv);
215 return NULL; 204 return NULL;
216 } 205 }
@@ -218,41 +207,10 @@ static struct mon_private *mon_alloc_mem(void)
218 return monpriv; 207 return monpriv;
219} 208}
220 209
221static inline void mon_read_debug(struct mon_msg *monmsg,
222 struct mon_private *monpriv)
223{
224#ifdef MON_DEBUG
225 u8 msg_type[2], mca_type;
226 unsigned long records_len;
227
228 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
229
230 memcpy(msg_type, &monmsg->msg.class, 2);
231 EBCASC(msg_type, 2);
232 mca_type = mon_mca_type(monmsg, 0);
233 EBCASC(&mca_type, 1);
234
235 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
236 monpriv->read_index, monpriv->write_index);
237 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
238 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
239 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
240 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
241 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
242 P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
243 mon_mca_start(monmsg), mon_mca_end(monmsg));
244 P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
245 mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
246 if (mon_mca_size(monmsg) > 12)
247 P_DEBUG("READ, MORE THAN ONE MCA\n\n");
248#endif
249}
250
251static inline void mon_next_mca(struct mon_msg *monmsg) 210static inline void mon_next_mca(struct mon_msg *monmsg)
252{ 211{
253 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) 212 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
254 return; 213 return;
255 P_DEBUG("READ, NEXT MCA\n\n");
256 monmsg->mca_offset += 12; 214 monmsg->mca_offset += 12;
257 monmsg->pos = 0; 215 monmsg->pos = 0;
258} 216}
@@ -269,7 +227,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv)
269 monmsg->msglim_reached = 0; 227 monmsg->msglim_reached = 0;
270 monmsg->pos = 0; 228 monmsg->pos = 0;
271 monmsg->mca_offset = 0; 229 monmsg->mca_offset = 0;
272 P_WARNING("read, message limit reached\n");
273 monpriv->read_index = (monpriv->read_index + 1) % 230 monpriv->read_index = (monpriv->read_index + 1) %
274 MON_MSGLIM; 231 MON_MSGLIM;
275 atomic_dec(&monpriv->read_ready); 232 atomic_dec(&monpriv->read_ready);
@@ -286,10 +243,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
286{ 243{
287 struct mon_private *monpriv = path->private; 244 struct mon_private *monpriv = path->private;
288 245
289 P_DEBUG("IUCV connection completed\n");
290 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
291 "0x%02X, Sample = 0x%02X\n",
292 ipuser[0], ipuser[1], ipuser[2]);
293 atomic_set(&monpriv->iucv_connected, 1); 246 atomic_set(&monpriv->iucv_connected, 1);
294 wake_up(&mon_conn_wait_queue); 247 wake_up(&mon_conn_wait_queue);
295} 248}
@@ -310,7 +263,6 @@ static void mon_iucv_message_pending(struct iucv_path *path,
310{ 263{
311 struct mon_private *monpriv = path->private; 264 struct mon_private *monpriv = path->private;
312 265
313 P_DEBUG("IUCV message pending\n");
314 memcpy(&monpriv->msg_array[monpriv->write_index]->msg, 266 memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
315 msg, sizeof(*msg)); 267 msg, sizeof(*msg));
316 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { 268 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
@@ -340,6 +292,7 @@ static int mon_open(struct inode *inode, struct file *filp)
340 /* 292 /*
341 * only one user allowed 293 * only one user allowed
342 */ 294 */
295 lock_kernel();
343 rc = -EBUSY; 296 rc = -EBUSY;
344 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 297 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
345 goto out; 298 goto out;
@@ -375,8 +328,8 @@ static int mon_open(struct inode *inode, struct file *filp)
375 rc = -EIO; 328 rc = -EIO;
376 goto out_path; 329 goto out_path;
377 } 330 }
378 P_INFO("open, established connection to *MONITOR service\n\n");
379 filp->private_data = monpriv; 331 filp->private_data = monpriv;
332 unlock_kernel();
380 return nonseekable_open(inode, filp); 333 return nonseekable_open(inode, filp);
381 334
382out_path: 335out_path:
@@ -386,6 +339,7 @@ out_priv:
386out_use: 339out_use:
387 clear_bit(MON_IN_USE, &mon_in_use); 340 clear_bit(MON_IN_USE, &mon_in_use);
388out: 341out:
342 unlock_kernel();
389 return rc; 343 return rc;
390} 344}
391 345
@@ -400,8 +354,6 @@ static int mon_close(struct inode *inode, struct file *filp)
400 rc = iucv_path_sever(monpriv->path, user_data_sever); 354 rc = iucv_path_sever(monpriv->path, user_data_sever);
401 if (rc) 355 if (rc)
402 P_ERROR("close, iucv_sever failed with rc = %i\n", rc); 356 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
403 else
404 P_INFO("close, terminated connection to *MONITOR service\n");
405 357
406 atomic_set(&monpriv->iucv_severed, 0); 358 atomic_set(&monpriv->iucv_severed, 0);
407 atomic_set(&monpriv->iucv_connected, 0); 359 atomic_set(&monpriv->iucv_connected, 0);
@@ -442,10 +394,8 @@ static ssize_t mon_read(struct file *filp, char __user *data,
442 monmsg = monpriv->msg_array[monpriv->read_index]; 394 monmsg = monpriv->msg_array[monpriv->read_index];
443 } 395 }
444 396
445 if (!monmsg->pos) { 397 if (!monmsg->pos)
446 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; 398 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
447 mon_read_debug(monmsg, monpriv);
448 }
449 if (mon_check_mca(monmsg)) 399 if (mon_check_mca(monmsg))
450 goto reply; 400 goto reply;
451 401
@@ -531,7 +481,6 @@ static int __init mon_init(void)
531 P_ERROR("failed to register with iucv driver\n"); 481 P_ERROR("failed to register with iucv driver\n");
532 return rc; 482 return rc;
533 } 483 }
534 P_INFO("open, registered with IUCV\n");
535 484
536 rc = segment_type(mon_dcss_name); 485 rc = segment_type(mon_dcss_name);
537 if (rc < 0) { 486 if (rc < 0) {
@@ -555,13 +504,8 @@ static int __init mon_init(void)
555 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 504 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
556 505
557 rc = misc_register(&mon_dev); 506 rc = misc_register(&mon_dev);
558 if (rc < 0 ) { 507 if (rc < 0 )
559 P_ERROR("misc_register failed, rc = %i\n", rc);
560 goto out; 508 goto out;
561 }
562 P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
563 mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
564 mon_dcss_end - mon_dcss_start + 1);
565 return 0; 509 return 0;
566 510
567out: 511out:
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index a86c0534cd49..4d71aa8c1a79 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -12,6 +12,7 @@
12#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/smp_lock.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -179,10 +180,12 @@ static int monwrite_open(struct inode *inode, struct file *filp)
179 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 180 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
180 if (!monpriv) 181 if (!monpriv)
181 return -ENOMEM; 182 return -ENOMEM;
183 lock_kernel();
182 INIT_LIST_HEAD(&monpriv->list); 184 INIT_LIST_HEAD(&monpriv->list);
183 monpriv->hdr_to_read = sizeof(monpriv->hdr); 185 monpriv->hdr_to_read = sizeof(monpriv->hdr);
184 mutex_init(&monpriv->thread_mutex); 186 mutex_init(&monpriv->thread_mutex);
185 filp->private_data = monpriv; 187 filp->private_data = monpriv;
188 unlock_kernel();
186 return nonseekable_open(inode, filp); 189 return nonseekable_open(inode, filp);
187} 190}
188 191
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 848ef7e8523f..81a96e019080 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
153 struct raw3270_request *rq; 153 struct raw3270_request *rq;
154 154
155 rq = alloc_bootmem_low(sizeof(struct raw3270)); 155 rq = alloc_bootmem_low(sizeof(struct raw3270));
156 if (!rq)
157 return ERR_PTR(-ENOMEM);
158 memset(rq, 0, sizeof(struct raw3270_request));
159 156
160 /* alloc output buffer. */ 157 /* alloc output buffer. */
161 if (size > 0) { 158 if (size > 0)
162 rq->buffer = alloc_bootmem_low(size); 159 rq->buffer = alloc_bootmem_low(size);
163 if (!rq->buffer) {
164 free_bootmem((unsigned long) rq,
165 sizeof(struct raw3270));
166 return ERR_PTR(-ENOMEM);
167 }
168 }
169 rq->size = size; 160 rq->size = size;
170 INIT_LIST_HEAD(&rq->list); 161 INIT_LIST_HEAD(&rq->list);
171 162
@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
372 363
373 if (IS_ERR(irb)) 364 if (IS_ERR(irb))
374 rc = RAW3270_IO_RETRY; 365 rc = RAW3270_IO_RETRY;
375 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 366 else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
376 rq->rc = -EIO; 367 rq->rc = -EIO;
377 rc = RAW3270_IO_DONE; 368 rc = RAW3270_IO_DONE;
378 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | 369 } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
379 DEV_STAT_UNIT_EXCEP)) { 370 DEV_STAT_UNIT_EXCEP)) {
380 /* Handle CE-DE-UE and subsequent UDE */ 371 /* Handle CE-DE-UE and subsequent UDE */
381 set_bit(RAW3270_FLAGS_BUSY, &rp->flags); 372 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
382 rc = RAW3270_IO_BUSY; 373 rc = RAW3270_IO_BUSY;
383 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { 374 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
384 /* Wait for UDE if busy flag is set. */ 375 /* Wait for UDE if busy flag is set. */
385 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 376 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
386 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); 377 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
387 /* Got it, now retry. */ 378 /* Got it, now retry. */
388 rc = RAW3270_IO_RETRY; 379 rc = RAW3270_IO_RETRY;
@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
497 * Unit-Check Processing: 488 * Unit-Check Processing:
498 * Expect Command Reject or Intervention Required. 489 * Expect Command Reject or Intervention Required.
499 */ 490 */
500 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 491 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
501 /* Request finished abnormally. */ 492 /* Request finished abnormally. */
502 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 493 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
503 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 494 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
505 } 496 }
506 } 497 }
507 if (rq) { 498 if (rq) {
508 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 499 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
509 if (irb->ecw[0] & SNS0_CMD_REJECT) 500 if (irb->ecw[0] & SNS0_CMD_REJECT)
510 rq->rc = -EOPNOTSUPP; 501 rq->rc = -EOPNOTSUPP;
511 else 502 else
512 rq->rc = -EIO; 503 rq->rc = -EIO;
513 } else 504 } else
514 /* Request finished normally. Copy residual count. */ 505 /* Request finished normally. Copy residual count. */
515 rq->rescnt = irb->scsw.count; 506 rq->rescnt = irb->scsw.cmd.count;
516 } 507 }
517 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 508 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
518 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); 509 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
519 wake_up(&raw3270_wait_queue); 510 wake_up(&raw3270_wait_queue);
520 } 511 }
@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp)
619 rp->cols = 132; 610 rp->cols = 132;
620 break; 611 break;
621 default: 612 default:
622 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
623 rc = -EOPNOTSUPP; 613 rc = -EOPNOTSUPP;
624 break; 614 break;
625 } 615 }
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 2c7a1ee6b041..3c8b25e6c345 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
506 if (scbuf->validity_sclp_send_mask) 506 if (scbuf->validity_sclp_send_mask)
507 sclp_send_mask = scbuf->sclp_send_mask; 507 sclp_send_mask = scbuf->sclp_send_mask;
508 spin_unlock_irqrestore(&sclp_lock, flags); 508 spin_unlock_irqrestore(&sclp_lock, flags);
509 if (scbuf->validity_sclp_active_facility_mask)
510 sclp_facilities = scbuf->sclp_active_facility_mask;
509 sclp_dispatch_state_change(); 511 sclp_dispatch_state_change();
510} 512}
511 513
@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code)
782 /* Is this the interrupt we are waiting for? */ 784 /* Is this the interrupt we are waiting for? */
783 if (finished_sccb == 0) 785 if (finished_sccb == 0)
784 return; 786 return;
785 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { 787 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
786 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " 788 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
787 "for buffer at 0x%x\n", finished_sccb); 789 finished_sccb);
788 return;
789 }
790 spin_lock(&sclp_lock); 790 spin_lock(&sclp_lock);
791 if (sclp_running_state == sclp_running_state_running) { 791 if (sclp_running_state == sclp_running_state_running) {
792 sclp_init_req.status = SCLP_REQ_DONE; 792 sclp_init_req.status = SCLP_REQ_DONE;
@@ -883,8 +883,6 @@ sclp_init(void)
883 unsigned long flags; 883 unsigned long flags;
884 int rc; 884 int rc;
885 885
886 if (!MACHINE_HAS_SCLP)
887 return -ENODEV;
888 spin_lock_irqsave(&sclp_lock, flags); 886 spin_lock_irqsave(&sclp_lock, flags);
889 /* Check for previous or running initialization */ 887 /* Check for previous or running initialization */
890 if (sclp_init_state != sclp_init_state_uninitialized) { 888 if (sclp_init_state != sclp_init_state_uninitialized) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b5c23396f8fe..0c2b77493db4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -11,6 +11,9 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/memory.h>
14#include <asm/chpid.h> 17#include <asm/chpid.h>
15#include <asm/sclp.h> 18#include <asm/sclp.h>
16#include "sclp.h" 19#include "sclp.h"
@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid;
43 46
44u64 sclp_facilities; 47u64 sclp_facilities;
45static u8 sclp_fac84; 48static u8 sclp_fac84;
49static unsigned long long rzm;
50static unsigned long long rnmax;
46 51
47static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 52static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
48{ 53{
@@ -62,7 +67,7 @@ out:
62 return rc; 67 return rc;
63} 68}
64 69
65void __init sclp_read_info_early(void) 70static void __init sclp_read_info_early(void)
66{ 71{
67 int rc; 72 int rc;
68 int i; 73 int i;
@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void)
92 97
93void __init sclp_facilities_detect(void) 98void __init sclp_facilities_detect(void)
94{ 99{
100 struct read_info_sccb *sccb;
101
102 sclp_read_info_early();
95 if (!early_read_info_sccb_valid) 103 if (!early_read_info_sccb_valid)
96 return; 104 return;
97 sclp_facilities = early_read_info_sccb.facilities; 105
98 sclp_fac84 = early_read_info_sccb.fac84; 106 sccb = &early_read_info_sccb;
107 sclp_facilities = sccb->facilities;
108 sclp_fac84 = sccb->fac84;
109 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
110 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
111 rzm <<= 20;
99} 112}
100 113
101unsigned long long __init sclp_memory_detect(void) 114unsigned long long sclp_get_rnmax(void)
102{ 115{
103 unsigned long long memsize; 116 return rnmax;
104 struct read_info_sccb *sccb; 117}
105 118
106 if (!early_read_info_sccb_valid) 119unsigned long long sclp_get_rzm(void)
107 return 0; 120{
108 sccb = &early_read_info_sccb; 121 return rzm;
109 if (sccb->rnsize)
110 memsize = sccb->rnsize << 20;
111 else
112 memsize = sccb->rnsize2 << 20;
113 if (sccb->rnmax)
114 memsize *= sccb->rnmax;
115 else
116 memsize *= sccb->rnmax2;
117 return memsize;
118} 122}
119 123
120/* 124/*
121 * This function will be called after sclp_memory_detect(), which gets called 125 * This function will be called after sclp_facilities_detect(), which gets
122 * early from early.c code. Therefore the sccb should have valid contents. 126 * called from early.c code. Therefore the sccb should have valid contents.
123 */ 127 */
124void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 128void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
125{ 129{
@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu)
278 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); 282 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
279} 283}
280 284
285#ifdef CONFIG_MEMORY_HOTPLUG
286
287static DEFINE_MUTEX(sclp_mem_mutex);
288static LIST_HEAD(sclp_mem_list);
289static u8 sclp_max_storage_id;
290static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
291
292struct memory_increment {
293 struct list_head list;
294 u16 rn;
295 int standby;
296 int usecount;
297};
298
299struct assign_storage_sccb {
300 struct sccb_header header;
301 u16 rn;
302} __packed;
303
304static unsigned long long rn2addr(u16 rn)
305{
306 return (unsigned long long) (rn - 1) * rzm;
307}
308
309static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
310{
311 struct assign_storage_sccb *sccb;
312 int rc;
313
314 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
315 if (!sccb)
316 return -ENOMEM;
317 sccb->header.length = PAGE_SIZE;
318 sccb->rn = rn;
319 rc = do_sync_request(cmd, sccb);
320 if (rc)
321 goto out;
322 switch (sccb->header.response_code) {
323 case 0x0020:
324 case 0x0120:
325 break;
326 default:
327 rc = -EIO;
328 break;
329 }
330out:
331 free_page((unsigned long) sccb);
332 return rc;
333}
334
335static int sclp_assign_storage(u16 rn)
336{
337 return do_assign_storage(0x000d0001, rn);
338}
339
340static int sclp_unassign_storage(u16 rn)
341{
342 return do_assign_storage(0x000c0001, rn);
343}
344
345struct attach_storage_sccb {
346 struct sccb_header header;
347 u16 :16;
348 u16 assigned;
349 u32 :32;
350 u32 entries[0];
351} __packed;
352
353static int sclp_attach_storage(u8 id)
354{
355 struct attach_storage_sccb *sccb;
356 int rc;
357 int i;
358
359 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
360 if (!sccb)
361 return -ENOMEM;
362 sccb->header.length = PAGE_SIZE;
363 rc = do_sync_request(0x00080001 | id << 8, sccb);
364 if (rc)
365 goto out;
366 switch (sccb->header.response_code) {
367 case 0x0020:
368 set_bit(id, sclp_storage_ids);
369 for (i = 0; i < sccb->assigned; i++)
370 sclp_unassign_storage(sccb->entries[i] >> 16);
371 break;
372 default:
373 rc = -EIO;
374 break;
375 }
376out:
377 free_page((unsigned long) sccb);
378 return rc;
379}
380
381static int sclp_mem_change_state(unsigned long start, unsigned long size,
382 int online)
383{
384 struct memory_increment *incr;
385 unsigned long long istart;
386 int rc = 0;
387
388 list_for_each_entry(incr, &sclp_mem_list, list) {
389 istart = rn2addr(incr->rn);
390 if (start + size - 1 < istart)
391 break;
392 if (start > istart + rzm - 1)
393 continue;
394 if (online) {
395 if (incr->usecount++)
396 continue;
397 /*
398 * Don't break the loop if one assign fails. Loop may
399 * be walked again on CANCEL and we can't save
400 * information if state changed before or not.
401 * So continue and increase usecount for all increments.
402 */
403 rc |= sclp_assign_storage(incr->rn);
404 } else {
405 if (--incr->usecount)
406 continue;
407 sclp_unassign_storage(incr->rn);
408 }
409 }
410 return rc ? -EIO : 0;
411}
412
413static int sclp_mem_notifier(struct notifier_block *nb,
414 unsigned long action, void *data)
415{
416 unsigned long start, size;
417 struct memory_notify *arg;
418 unsigned char id;
419 int rc = 0;
420
421 arg = data;
422 start = arg->start_pfn << PAGE_SHIFT;
423 size = arg->nr_pages << PAGE_SHIFT;
424 mutex_lock(&sclp_mem_mutex);
425 for (id = 0; id <= sclp_max_storage_id; id++)
426 if (!test_bit(id, sclp_storage_ids))
427 sclp_attach_storage(id);
428 switch (action) {
429 case MEM_ONLINE:
430 break;
431 case MEM_GOING_ONLINE:
432 rc = sclp_mem_change_state(start, size, 1);
433 break;
434 case MEM_CANCEL_ONLINE:
435 sclp_mem_change_state(start, size, 0);
436 break;
437 default:
438 rc = -EINVAL;
439 break;
440 }
441 mutex_unlock(&sclp_mem_mutex);
442 return rc ? NOTIFY_BAD : NOTIFY_OK;
443}
444
445static struct notifier_block sclp_mem_nb = {
446 .notifier_call = sclp_mem_notifier,
447};
448
449static void __init add_memory_merged(u16 rn)
450{
451 static u16 first_rn, num;
452 unsigned long long start, size;
453
454 if (rn && first_rn && (first_rn + num == rn)) {
455 num++;
456 return;
457 }
458 if (!first_rn)
459 goto skip_add;
460 start = rn2addr(first_rn);
461 size = (unsigned long long ) num * rzm;
462 if (start >= VMEM_MAX_PHYS)
463 goto skip_add;
464 if (start + size > VMEM_MAX_PHYS)
465 size = VMEM_MAX_PHYS - start;
466 add_memory(0, start, size);
467skip_add:
468 first_rn = rn;
469 num = 1;
470}
471
472static void __init sclp_add_standby_memory(void)
473{
474 struct memory_increment *incr;
475
476 list_for_each_entry(incr, &sclp_mem_list, list)
477 if (incr->standby)
478 add_memory_merged(incr->rn);
479 add_memory_merged(0);
480}
481
482static void __init insert_increment(u16 rn, int standby, int assigned)
483{
484 struct memory_increment *incr, *new_incr;
485 struct list_head *prev;
486 u16 last_rn;
487
488 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
489 if (!new_incr)
490 return;
491 new_incr->rn = rn;
492 new_incr->standby = standby;
493 last_rn = 0;
494 prev = &sclp_mem_list;
495 list_for_each_entry(incr, &sclp_mem_list, list) {
496 if (assigned && incr->rn > rn)
497 break;
498 if (!assigned && incr->rn - last_rn > 1)
499 break;
500 last_rn = incr->rn;
501 prev = &incr->list;
502 }
503 if (!assigned)
504 new_incr->rn = last_rn + 1;
505 if (new_incr->rn > rnmax) {
506 kfree(new_incr);
507 return;
508 }
509 list_add(&new_incr->list, prev);
510}
511
512struct read_storage_sccb {
513 struct sccb_header header;
514 u16 max_id;
515 u16 assigned;
516 u16 standby;
517 u16 :16;
518 u32 entries[0];
519} __packed;
520
521static int __init sclp_detect_standby_memory(void)
522{
523 struct read_storage_sccb *sccb;
524 int i, id, assigned, rc;
525
526 if (!early_read_info_sccb_valid)
527 return 0;
528 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
529 return 0;
530 rc = -ENOMEM;
531 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
532 if (!sccb)
533 goto out;
534 assigned = 0;
535 for (id = 0; id <= sclp_max_storage_id; id++) {
536 memset(sccb, 0, PAGE_SIZE);
537 sccb->header.length = PAGE_SIZE;
538 rc = do_sync_request(0x00040001 | id << 8, sccb);
539 if (rc)
540 goto out;
541 switch (sccb->header.response_code) {
542 case 0x0010:
543 set_bit(id, sclp_storage_ids);
544 for (i = 0; i < sccb->assigned; i++) {
545 if (!sccb->entries[i])
546 continue;
547 assigned++;
548 insert_increment(sccb->entries[i] >> 16, 0, 1);
549 }
550 break;
551 case 0x0310:
552 break;
553 case 0x0410:
554 for (i = 0; i < sccb->assigned; i++) {
555 if (!sccb->entries[i])
556 continue;
557 assigned++;
558 insert_increment(sccb->entries[i] >> 16, 1, 1);
559 }
560 break;
561 default:
562 rc = -EIO;
563 break;
564 }
565 if (!rc)
566 sclp_max_storage_id = sccb->max_id;
567 }
568 if (rc || list_empty(&sclp_mem_list))
569 goto out;
570 for (i = 1; i <= rnmax - assigned; i++)
571 insert_increment(0, 1, 0);
572 rc = register_memory_notifier(&sclp_mem_nb);
573 if (rc)
574 goto out;
575 sclp_add_standby_memory();
576out:
577 free_page((unsigned long) sccb);
578 return rc;
579}
580__initcall(sclp_detect_standby_memory);
581
582#endif /* CONFIG_MEMORY_HOTPLUG */
583
281/* 584/*
282 * Channel path configuration related functions. 585 * Channel path configuration related functions.
283 */ 586 */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index ead1043d788e..7e619c534bf4 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -14,14 +14,13 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/jiffies.h> 15#include <linux/jiffies.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/termios.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
19#include "sclp.h" 20#include "sclp.h"
20#include "sclp_rw.h" 21#include "sclp_rw.h"
21#include "sclp_tty.h" 22#include "sclp_tty.h"
22 23
23#define SCLP_CON_PRINT_HEADER "sclp console driver: "
24
25#define sclp_console_major 4 /* TTYAUX_MAJOR */ 24#define sclp_console_major 4 /* TTYAUX_MAJOR */
26#define sclp_console_minor 64 25#define sclp_console_minor 64
27#define sclp_console_name "ttyS" 26#define sclp_console_name "ttyS"
@@ -222,8 +221,6 @@ sclp_console_init(void)
222 INIT_LIST_HEAD(&sclp_con_pages); 221 INIT_LIST_HEAD(&sclp_con_pages);
223 for (i = 0; i < MAX_CONSOLE_PAGES; i++) { 222 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
224 page = alloc_bootmem_low_pages(PAGE_SIZE); 223 page = alloc_bootmem_low_pages(PAGE_SIZE);
225 if (page == NULL)
226 return -ENOMEM;
227 list_add_tail((struct list_head *) page, &sclp_con_pages); 224 list_add_tail((struct list_head *) page, &sclp_con_pages);
228 } 225 }
229 INIT_LIST_HEAD(&sclp_con_outqueue); 226 INIT_LIST_HEAD(&sclp_con_outqueue);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index ad05a87bc480..fff4ff485d9b 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/kthread.h>
11#include <linux/sysdev.h> 12#include <linux/sysdev.h>
12#include <linux/workqueue.h> 13#include <linux/workqueue.h>
13#include <asm/smp.h> 14#include <asm/smp.h>
@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
40 put_online_cpus(); 41 put_online_cpus();
41} 42}
42 43
43static void __ref sclp_cpu_change_notify(struct work_struct *work) 44static int sclp_cpu_kthread(void *data)
44{ 45{
45 smp_rescan_cpus(); 46 smp_rescan_cpus();
47 return 0;
48}
49
50static void __ref sclp_cpu_change_notify(struct work_struct *work)
51{
52 /* Can't call smp_rescan_cpus() from workqueue context since it may
53 * deadlock in case of cpu hotplug. So we have to create a kernel
54 * thread in order to call it.
55 */
56 kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
46} 57}
47 58
48static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 59static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void)
74 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); 85 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
75 86
76 rc = sclp_register(&sclp_conf_register); 87 rc = sclp_register(&sclp_conf_register);
77 if (rc) { 88 if (rc)
78 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
79 return rc; 89 return rc;
80 }
81 90
82 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { 91 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
83 printk(KERN_WARNING TAG "no configuration management.\n"); 92 printk(KERN_WARNING TAG "no configuration management.\n");
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 9f37456222e9..d887bd261d28 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -27,6 +27,8 @@
27#define CPI_LENGTH_NAME 8 27#define CPI_LENGTH_NAME 8
28#define CPI_LENGTH_LEVEL 16 28#define CPI_LENGTH_LEVEL 16
29 29
30static DEFINE_MUTEX(sclp_cpi_mutex);
31
30struct cpi_evbuf { 32struct cpi_evbuf {
31 struct evbuf_header header; 33 struct evbuf_header header;
32 u8 id_format; 34 u8 id_format;
@@ -124,21 +126,15 @@ static int cpi_req(void)
124 int response; 126 int response;
125 127
126 rc = sclp_register(&sclp_cpi_event); 128 rc = sclp_register(&sclp_cpi_event);
127 if (rc) { 129 if (rc)
128 printk(KERN_WARNING "cpi: could not register "
129 "to hardware console.\n");
130 goto out; 130 goto out;
131 }
132 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { 131 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n");
135 rc = -EOPNOTSUPP; 132 rc = -EOPNOTSUPP;
136 goto out_unregister; 133 goto out_unregister;
137 } 134 }
138 135
139 req = cpi_prepare_req(); 136 req = cpi_prepare_req();
140 if (IS_ERR(req)) { 137 if (IS_ERR(req)) {
141 printk(KERN_WARNING "cpi: could not allocate request\n");
142 rc = PTR_ERR(req); 138 rc = PTR_ERR(req);
143 goto out_unregister; 139 goto out_unregister;
144 } 140 }
@@ -148,10 +144,8 @@ static int cpi_req(void)
148 144
149 /* Add request to sclp queue */ 145 /* Add request to sclp queue */
150 rc = sclp_add_request(req); 146 rc = sclp_add_request(req);
151 if (rc) { 147 if (rc)
152 printk(KERN_WARNING "cpi: could not start request\n");
153 goto out_free_req; 148 goto out_free_req;
154 }
155 149
156 wait_for_completion(&completion); 150 wait_for_completion(&completion);
157 151
@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value)
223static ssize_t system_name_show(struct kobject *kobj, 217static ssize_t system_name_show(struct kobject *kobj,
224 struct kobj_attribute *attr, char *page) 218 struct kobj_attribute *attr, char *page)
225{ 219{
226 return snprintf(page, PAGE_SIZE, "%s\n", system_name); 220 int rc;
221
222 mutex_lock(&sclp_cpi_mutex);
223 rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
224 mutex_unlock(&sclp_cpi_mutex);
225 return rc;
227} 226}
228 227
229static ssize_t system_name_store(struct kobject *kobj, 228static ssize_t system_name_store(struct kobject *kobj,
@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj,
237 if (rc) 236 if (rc)
238 return rc; 237 return rc;
239 238
239 mutex_lock(&sclp_cpi_mutex);
240 set_string(system_name, buf); 240 set_string(system_name, buf);
241 mutex_unlock(&sclp_cpi_mutex);
241 242
242 return len; 243 return len;
243} 244}
@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr =
248static ssize_t sysplex_name_show(struct kobject *kobj, 249static ssize_t sysplex_name_show(struct kobject *kobj,
249 struct kobj_attribute *attr, char *page) 250 struct kobj_attribute *attr, char *page)
250{ 251{
251 return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); 252 int rc;
253
254 mutex_lock(&sclp_cpi_mutex);
255 rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
256 mutex_unlock(&sclp_cpi_mutex);
257 return rc;
252} 258}
253 259
254static ssize_t sysplex_name_store(struct kobject *kobj, 260static ssize_t sysplex_name_store(struct kobject *kobj,
@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj,
262 if (rc) 268 if (rc)
263 return rc; 269 return rc;
264 270
271 mutex_lock(&sclp_cpi_mutex);
265 set_string(sysplex_name, buf); 272 set_string(sysplex_name, buf);
273 mutex_unlock(&sclp_cpi_mutex);
266 274
267 return len; 275 return len;
268} 276}
@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr =
273static ssize_t system_type_show(struct kobject *kobj, 281static ssize_t system_type_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *page) 282 struct kobj_attribute *attr, char *page)
275{ 283{
276 return snprintf(page, PAGE_SIZE, "%s\n", system_type); 284 int rc;
285
286 mutex_lock(&sclp_cpi_mutex);
287 rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
288 mutex_unlock(&sclp_cpi_mutex);
289 return rc;
277} 290}
278 291
279static ssize_t system_type_store(struct kobject *kobj, 292static ssize_t system_type_store(struct kobject *kobj,
@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj,
287 if (rc) 300 if (rc)
288 return rc; 301 return rc;
289 302
303 mutex_lock(&sclp_cpi_mutex);
290 set_string(system_type, buf); 304 set_string(system_type, buf);
305 mutex_unlock(&sclp_cpi_mutex);
291 306
292 return len; 307 return len;
293} 308}
@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr =
298static ssize_t system_level_show(struct kobject *kobj, 313static ssize_t system_level_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *page) 314 struct kobj_attribute *attr, char *page)
300{ 315{
301 unsigned long long level = system_level; 316 unsigned long long level;
302 317
318 mutex_lock(&sclp_cpi_mutex);
319 level = system_level;
320 mutex_unlock(&sclp_cpi_mutex);
303 return snprintf(page, PAGE_SIZE, "%#018llx\n", level); 321 return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
304} 322}
305 323
@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj,
320 if (*endp) 338 if (*endp)
321 return -EINVAL; 339 return -EINVAL;
322 340
341 mutex_lock(&sclp_cpi_mutex);
323 system_level = level; 342 system_level = level;
324 343 mutex_unlock(&sclp_cpi_mutex);
325 return len; 344 return len;
326} 345}
327 346
@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj,
334{ 353{
335 int rc; 354 int rc;
336 355
356 mutex_lock(&sclp_cpi_mutex);
337 rc = cpi_req(); 357 rc = cpi_req();
358 mutex_unlock(&sclp_cpi_mutex);
338 if (rc) 359 if (rc)
339 return rc; 360 return rc;
340 361
@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
373 if (rc) 394 if (rc)
374 return rc; 395 return rc;
375 396
397 mutex_lock(&sclp_cpi_mutex);
376 set_string(system_name, system); 398 set_string(system_name, system);
377 set_string(sysplex_name, sysplex); 399 set_string(sysplex_name, sysplex);
378 set_string(system_type, type); 400 set_string(system_type, type);
379 system_level = level; 401 system_level = level;
380 402
381 return cpi_req(); 403 rc = cpi_req();
404 mutex_unlock(&sclp_cpi_mutex);
405
406 return rc;
382} 407}
383EXPORT_SYMBOL(sclp_cpi_set_data); 408EXPORT_SYMBOL(sclp_cpi_set_data);
384 409
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 45ff25e787cb..84c191c1cd62 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = {
51static int __init 51static int __init
52sclp_quiesce_init(void) 52sclp_quiesce_init(void)
53{ 53{
54 int rc; 54 return sclp_register(&sclp_quiesce_event);
55
56 rc = sclp_register(&sclp_quiesce_event);
57 if (rc)
58 printk(KERN_WARNING "sclp: could not register quiesce handler "
59 "(rc=%d)\n", rc);
60 return rc;
61} 55}
62 56
63module_init(sclp_quiesce_init); 57module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index da09781b32f7..710af42603f8 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -19,8 +19,6 @@
19#include "sclp.h" 19#include "sclp.h"
20#include "sclp_rw.h" 20#include "sclp_rw.h"
21 21
22#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
23
24/* 22/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 23 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the SCLP documentation) 24 * (as it is specified as the maximum size in the SCLP documentation)
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 1c064976b32b..8b854857ba07 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void)
239 debug_register_view(sdias_dbf, &debug_sprintf_view); 239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6); 240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register); 241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) { 242 if (rc)
243 ERROR_MSG("sclp register failed\n");
244 return rc; 243 return rc;
245 }
246 init_waitqueue_head(&sdias_wq); 244 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n"); 245 TRACE("init done\n");
248 return 0; 246 return 0;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 40b11521cd20..434ba04b1309 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -13,7 +13,6 @@
13#include <linux/tty.h> 13#include <linux/tty.h>
14#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
15#include <linux/tty_flip.h> 15#include <linux/tty_flip.h>
16#include <linux/wait.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -25,8 +24,6 @@
25#include "sclp_rw.h" 24#include "sclp_rw.h"
26#include "sclp_tty.h" 25#include "sclp_tty.h"
27 26
28#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
29
30/* 27/*
31 * size of a buffer that collects single characters coming in 28 * size of a buffer that collects single characters coming in
32 * via sclp_tty_put_char() 29 * via sclp_tty_put_char()
@@ -50,8 +47,6 @@ static int sclp_tty_buffer_count;
50static struct sclp_buffer *sclp_ttybuf; 47static struct sclp_buffer *sclp_ttybuf;
51/* Timer for delayed output of console messages. */ 48/* Timer for delayed output of console messages. */
52static struct timer_list sclp_tty_timer; 49static struct timer_list sclp_tty_timer;
53/* Waitqueue to wait for buffers to get empty. */
54static wait_queue_head_t sclp_tty_waitq;
55 50
56static struct tty_struct *sclp_tty; 51static struct tty_struct *sclp_tty;
57static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; 52static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
@@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count;
59 54
60struct tty_driver *sclp_tty_driver; 55struct tty_driver *sclp_tty_driver;
61 56
62static struct sclp_ioctls sclp_ioctls; 57static int sclp_tty_tolower;
63static struct sclp_ioctls sclp_ioctls_init = 58static int sclp_tty_columns = 80;
64{ 59
65 8, /* 1 hor. tab. = 8 spaces */ 60#define SPACES_PER_TAB 8
66 0, /* no echo of input by this driver */ 61#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
67 80, /* 80 characters/line */
68 1, /* write after 1/10 s without final new line */
69 MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
70 MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
71 0, /* do not convert to lower case */
72 0x6c /* to seprate upper and lower case */
73 /* ('%' in EBCDIC) */
74};
75 62
76/* This routine is called whenever we try to open a SCLP terminal. */ 63/* This routine is called whenever we try to open a SCLP terminal. */
77static int 64static int
@@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
92 sclp_tty = NULL; 79 sclp_tty = NULL;
93} 80}
94 81
95/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
96static int
97sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
98 unsigned int cmd, unsigned long arg)
99{
100 unsigned long flags;
101 unsigned int obuf;
102 int check;
103 int rc;
104
105 if (tty->flags & (1 << TTY_IO_ERROR))
106 return -EIO;
107 rc = 0;
108 check = 0;
109 switch (cmd) {
110 case TIOCSCLPSHTAB:
111 /* set width of horizontal tab */
112 if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
113 rc = -EFAULT;
114 else
115 check = 1;
116 break;
117 case TIOCSCLPGHTAB:
118 /* get width of horizontal tab */
119 if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
120 rc = -EFAULT;
121 break;
122 case TIOCSCLPSECHO:
123 /* enable/disable echo of input */
124 if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
125 rc = -EFAULT;
126 break;
127 case TIOCSCLPGECHO:
128 /* Is echo of input enabled ? */
129 if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
130 rc = -EFAULT;
131 break;
132 case TIOCSCLPSCOLS:
133 /* set number of columns for output */
134 if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
135 rc = -EFAULT;
136 else
137 check = 1;
138 break;
139 case TIOCSCLPGCOLS:
140 /* get number of columns for output */
141 if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
142 rc = -EFAULT;
143 break;
144 case TIOCSCLPSNL:
145 /* enable/disable writing without final new line character */
146 if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
147 rc = -EFAULT;
148 break;
149 case TIOCSCLPGNL:
150 /* Is writing without final new line character enabled ? */
151 if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
152 rc = -EFAULT;
153 break;
154 case TIOCSCLPSOBUF:
155 /*
156 * set the maximum buffers size for output, will be rounded
157 * up to next 4kB boundary and stored as number of SCCBs
158 * (4kB Buffers) limitation: 256 x 4kB
159 */
160 if (get_user(obuf, (unsigned int __user *) arg) == 0) {
161 if (obuf & 0xFFF)
162 sclp_ioctls.max_sccb = (obuf >> 12) + 1;
163 else
164 sclp_ioctls.max_sccb = (obuf >> 12);
165 } else
166 rc = -EFAULT;
167 break;
168 case TIOCSCLPGOBUF:
169 /* get the maximum buffers size for output */
170 obuf = sclp_ioctls.max_sccb << 12;
171 if (put_user(obuf, (unsigned int __user *) arg))
172 rc = -EFAULT;
173 break;
174 case TIOCSCLPGKBUF:
175 /* get the number of buffers got from kernel at startup */
176 if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
177 rc = -EFAULT;
178 break;
179 case TIOCSCLPSCASE:
180 /* enable/disable conversion from upper to lower case */
181 if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
182 rc = -EFAULT;
183 break;
184 case TIOCSCLPGCASE:
185 /* Is conversion from upper to lower case of input enabled? */
186 if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
187 rc = -EFAULT;
188 break;
189 case TIOCSCLPSDELIM:
190 /*
191 * set special character used for separating upper and
192 * lower case, 0x00 disables this feature
193 */
194 if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
195 rc = -EFAULT;
196 break;
197 case TIOCSCLPGDELIM:
198 /*
199 * get special character used for separating upper and
200 * lower case, 0x00 disables this feature
201 */
202 if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
203 rc = -EFAULT;
204 break;
205 case TIOCSCLPSINIT:
206 /* set initial (default) sclp ioctls */
207 sclp_ioctls = sclp_ioctls_init;
208 check = 1;
209 break;
210 default:
211 rc = -ENOIOCTLCMD;
212 break;
213 }
214 if (check) {
215 spin_lock_irqsave(&sclp_tty_lock, flags);
216 if (sclp_ttybuf != NULL) {
217 sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
218 sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
219 }
220 spin_unlock_irqrestore(&sclp_tty_lock, flags);
221 }
222 return rc;
223}
224
225/* 82/*
226 * This routine returns the numbers of characters the tty driver 83 * This routine returns the numbers of characters the tty driver
227 * will accept for queuing to be written. This number is subject 84 * will accept for queuing to be written. This number is subject
@@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
268 struct sclp_buffer, list); 125 struct sclp_buffer, list);
269 spin_unlock_irqrestore(&sclp_tty_lock, flags); 126 spin_unlock_irqrestore(&sclp_tty_lock, flags);
270 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); 127 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
271 wake_up(&sclp_tty_waitq);
272 /* check if the tty needs a wake up call */ 128 /* check if the tty needs a wake up call */
273 if (sclp_tty != NULL) { 129 if (sclp_tty != NULL) {
274 tty_wakeup(sclp_tty); 130 tty_wakeup(sclp_tty);
@@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data)
316/* 172/*
317 * Write a string to the sclp tty. 173 * Write a string to the sclp tty.
318 */ 174 */
319static void 175static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
320sclp_tty_write_string(const unsigned char *str, int count)
321{ 176{
322 unsigned long flags; 177 unsigned long flags;
323 void *page; 178 void *page;
324 int written; 179 int written;
180 int overall_written;
325 struct sclp_buffer *buf; 181 struct sclp_buffer *buf;
326 182
327 if (count <= 0) 183 if (count <= 0)
328 return; 184 return 0;
185 overall_written = 0;
329 spin_lock_irqsave(&sclp_tty_lock, flags); 186 spin_lock_irqsave(&sclp_tty_lock, flags);
330 do { 187 do {
331 /* Create a sclp output buffer if none exists yet */ 188 /* Create a sclp output buffer if none exists yet */
332 if (sclp_ttybuf == NULL) { 189 if (sclp_ttybuf == NULL) {
333 while (list_empty(&sclp_tty_pages)) { 190 while (list_empty(&sclp_tty_pages)) {
334 spin_unlock_irqrestore(&sclp_tty_lock, flags); 191 spin_unlock_irqrestore(&sclp_tty_lock, flags);
335 if (in_interrupt()) 192 if (may_fail)
336 sclp_sync_wait(); 193 goto out;
337 else 194 else
338 wait_event(sclp_tty_waitq, 195 sclp_sync_wait();
339 !list_empty(&sclp_tty_pages));
340 spin_lock_irqsave(&sclp_tty_lock, flags); 196 spin_lock_irqsave(&sclp_tty_lock, flags);
341 } 197 }
342 page = sclp_tty_pages.next; 198 page = sclp_tty_pages.next;
343 list_del((struct list_head *) page); 199 list_del((struct list_head *) page);
344 sclp_ttybuf = sclp_make_buffer(page, 200 sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
345 sclp_ioctls.columns, 201 SPACES_PER_TAB);
346 sclp_ioctls.htab);
347 } 202 }
348 /* try to write the string to the current output buffer */ 203 /* try to write the string to the current output buffer */
349 written = sclp_write(sclp_ttybuf, str, count); 204 written = sclp_write(sclp_ttybuf, str, count);
205 overall_written += written;
350 if (written == count) 206 if (written == count)
351 break; 207 break;
352 /* 208 /*
@@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count)
363 count -= written; 219 count -= written;
364 } while (count > 0); 220 } while (count > 0);
365 /* Setup timer to output current console buffer after 1/10 second */ 221 /* Setup timer to output current console buffer after 1/10 second */
366 if (sclp_ioctls.final_nl) { 222 if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
367 if (sclp_ttybuf != NULL && 223 !timer_pending(&sclp_tty_timer)) {
368 sclp_chars_in_buffer(sclp_ttybuf) != 0 && 224 init_timer(&sclp_tty_timer);
369 !timer_pending(&sclp_tty_timer)) { 225 sclp_tty_timer.function = sclp_tty_timeout;
370 init_timer(&sclp_tty_timer); 226 sclp_tty_timer.data = 0UL;
371 sclp_tty_timer.function = sclp_tty_timeout; 227 sclp_tty_timer.expires = jiffies + HZ/10;
372 sclp_tty_timer.data = 0UL; 228 add_timer(&sclp_tty_timer);
373 sclp_tty_timer.expires = jiffies + HZ/10;
374 add_timer(&sclp_tty_timer);
375 }
376 } else {
377 if (sclp_ttybuf != NULL &&
378 sclp_chars_in_buffer(sclp_ttybuf) != 0) {
379 buf = sclp_ttybuf;
380 sclp_ttybuf = NULL;
381 spin_unlock_irqrestore(&sclp_tty_lock, flags);
382 __sclp_ttybuf_emit(buf);
383 spin_lock_irqsave(&sclp_tty_lock, flags);
384 }
385 } 229 }
386 spin_unlock_irqrestore(&sclp_tty_lock, flags); 230 spin_unlock_irqrestore(&sclp_tty_lock, flags);
231out:
232 return overall_written;
387} 233}
388 234
389/* 235/*
@@ -395,11 +241,10 @@ static int
395sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) 241sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
396{ 242{
397 if (sclp_tty_chars_count > 0) { 243 if (sclp_tty_chars_count > 0) {
398 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 244 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
399 sclp_tty_chars_count = 0; 245 sclp_tty_chars_count = 0;
400 } 246 }
401 sclp_tty_write_string(buf, count); 247 return sclp_tty_write_string(buf, count, 1);
402 return count;
403} 248}
404 249
405/* 250/*
@@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
417{ 262{
418 sclp_tty_chars[sclp_tty_chars_count++] = ch; 263 sclp_tty_chars[sclp_tty_chars_count++] = ch;
419 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { 264 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
420 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 265 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
421 sclp_tty_chars_count = 0; 266 sclp_tty_chars_count = 0;
422 } return 1; 267 }
268 return 1;
423} 269}
424 270
425/* 271/*
@@ -430,7 +276,7 @@ static void
430sclp_tty_flush_chars(struct tty_struct *tty) 276sclp_tty_flush_chars(struct tty_struct *tty)
431{ 277{
432 if (sclp_tty_chars_count > 0) { 278 if (sclp_tty_chars_count > 0) {
433 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 279 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
434 sclp_tty_chars_count = 0; 280 sclp_tty_chars_count = 0;
435 } 281 }
436} 282}
@@ -469,7 +315,7 @@ static void
469sclp_tty_flush_buffer(struct tty_struct *tty) 315sclp_tty_flush_buffer(struct tty_struct *tty)
470{ 316{
471 if (sclp_tty_chars_count > 0) { 317 if (sclp_tty_chars_count > 0) {
472 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 318 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
473 sclp_tty_chars_count = 0; 319 sclp_tty_chars_count = 0;
474 } 320 }
475} 321}
@@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
517 * modifiy original string, 363 * modifiy original string,
518 * returns length of resulting string 364 * returns length of resulting string
519 */ 365 */
520static int 366static int sclp_switch_cases(unsigned char *buf, int count)
521sclp_switch_cases(unsigned char *buf, int count,
522 unsigned char delim, int tolower)
523{ 367{
524 unsigned char *ip, *op; 368 unsigned char *ip, *op;
525 int toggle; 369 int toggle;
@@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count,
529 ip = op = buf; 373 ip = op = buf;
530 while (count-- > 0) { 374 while (count-- > 0) {
531 /* compare with special character */ 375 /* compare with special character */
532 if (*ip == delim) { 376 if (*ip == CASE_DELIMITER) {
533 /* followed by another special character? */ 377 /* followed by another special character? */
534 if (count && ip[1] == delim) { 378 if (count && ip[1] == CASE_DELIMITER) {
535 /* 379 /*
536 * ... then put a single copy of the special 380 * ... then put a single copy of the special
537 * character to the output string 381 * character to the output string
@@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count,
550 /* not the special character */ 394 /* not the special character */
551 if (toggle) 395 if (toggle)
552 /* but case switching is on */ 396 /* but case switching is on */
553 if (tolower) 397 if (sclp_tty_tolower)
554 /* switch to uppercase */ 398 /* switch to uppercase */
555 *op++ = _ebc_toupper[(int) *ip++]; 399 *op++ = _ebc_toupper[(int) *ip++];
556 else 400 else
@@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end)
570 int count; 414 int count;
571 415
572 count = end - start; 416 count = end - start;
573 /* 417 if (sclp_tty_tolower)
574 * if set in ioctl convert EBCDIC to lower case
575 * (modify original input in SCCB)
576 */
577 if (sclp_ioctls.tolower)
578 EBC_TOLOWER(start, count); 418 EBC_TOLOWER(start, count);
579 419 count = sclp_switch_cases(start, count);
580 /*
581 * if set in ioctl find out characters in lower or upper case
582 * (depends on current case) separated by a special character,
583 * works on EBCDIC
584 */
585 if (sclp_ioctls.delim)
586 count = sclp_switch_cases(start, count,
587 sclp_ioctls.delim,
588 sclp_ioctls.tolower);
589
590 /* convert EBCDIC to ASCII (modify original input in SCCB) */ 420 /* convert EBCDIC to ASCII (modify original input in SCCB) */
591 sclp_ebcasc_str(start, count); 421 sclp_ebcasc_str(start, count);
592 422
593 /* if set in ioctl write operators input to console */
594 if (sclp_ioctls.echo)
595 sclp_tty_write(sclp_tty, start, count);
596
597 /* transfer input to high level driver */ 423 /* transfer input to high level driver */
598 sclp_tty_input(start, count); 424 sclp_tty_input(start, count);
599} 425}
@@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = {
717 .write_room = sclp_tty_write_room, 543 .write_room = sclp_tty_write_room,
718 .chars_in_buffer = sclp_tty_chars_in_buffer, 544 .chars_in_buffer = sclp_tty_chars_in_buffer,
719 .flush_buffer = sclp_tty_flush_buffer, 545 .flush_buffer = sclp_tty_flush_buffer,
720 .ioctl = sclp_tty_ioctl,
721}; 546};
722 547
723static int __init 548static int __init
@@ -736,9 +561,6 @@ sclp_tty_init(void)
736 561
737 rc = sclp_rw_init(); 562 rc = sclp_rw_init();
738 if (rc) { 563 if (rc) {
739 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
740 "could not register tty - "
741 "sclp_rw_init returned %d\n", rc);
742 put_tty_driver(driver); 564 put_tty_driver(driver);
743 return rc; 565 return rc;
744 } 566 }
@@ -754,7 +576,6 @@ sclp_tty_init(void)
754 } 576 }
755 INIT_LIST_HEAD(&sclp_tty_outqueue); 577 INIT_LIST_HEAD(&sclp_tty_outqueue);
756 spin_lock_init(&sclp_tty_lock); 578 spin_lock_init(&sclp_tty_lock);
757 init_waitqueue_head(&sclp_tty_waitq);
758 init_timer(&sclp_tty_timer); 579 init_timer(&sclp_tty_timer);
759 sclp_ttybuf = NULL; 580 sclp_ttybuf = NULL;
760 sclp_tty_buffer_count = 0; 581 sclp_tty_buffer_count = 0;
@@ -763,11 +584,10 @@ sclp_tty_init(void)
763 * save 4 characters for the CPU number 584 * save 4 characters for the CPU number
764 * written at start of each line by VM/CP 585 * written at start of each line by VM/CP
765 */ 586 */
766 sclp_ioctls_init.columns = 76; 587 sclp_tty_columns = 76;
767 /* case input lines to lowercase */ 588 /* case input lines to lowercase */
768 sclp_ioctls_init.tolower = 1; 589 sclp_tty_tolower = 1;
769 } 590 }
770 sclp_ioctls = sclp_ioctls_init;
771 sclp_tty_chars_count = 0; 591 sclp_tty_chars_count = 0;
772 sclp_tty = NULL; 592 sclp_tty = NULL;
773 593
@@ -792,9 +612,6 @@ sclp_tty_init(void)
792 tty_set_operations(driver, &sclp_ops); 612 tty_set_operations(driver, &sclp_ops);
793 rc = tty_register_driver(driver); 613 rc = tty_register_driver(driver);
794 if (rc) { 614 if (rc) {
795 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
796 "could not register tty - "
797 "tty_register_driver returned %d\n", rc);
798 put_tty_driver(driver); 615 put_tty_driver(driver);
799 return rc; 616 return rc;
800 } 617 }
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index 0ce2c1fc5340..4b965b22fecd 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -11,61 +11,8 @@
11#ifndef __SCLP_TTY_H__ 11#ifndef __SCLP_TTY_H__
12#define __SCLP_TTY_H__ 12#define __SCLP_TTY_H__
13 13
14#include <linux/ioctl.h>
15#include <linux/termios.h>
16#include <linux/tty_driver.h> 14#include <linux/tty_driver.h>
17 15
18/* This is the type of data structures storing sclp ioctl setting. */
19struct sclp_ioctls {
20 unsigned short htab;
21 unsigned char echo;
22 unsigned short columns;
23 unsigned char final_nl;
24 unsigned short max_sccb;
25 unsigned short kmem_sccb; /* can't be modified at run time */
26 unsigned char tolower;
27 unsigned char delim;
28};
29
30/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
31#define SCLP_IOCTL_LETTER 'B'
32
33/* set width of horizontal tabulator */
34#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
35/* enable/disable echo of input (independent from line discipline) */
36#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
37/* set number of colums for output */
38#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
39/* enable/disable writing without final new line character */
40#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
41/* set the maximum buffers size for output, rounded up to next 4kB boundary */
42#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
43/* set initial (default) sclp ioctls */
44#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
45/* enable/disable conversion from upper to lower case of input */
46#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
47/* set special character used for separating upper and lower case, */
48/* 0x00 disables this feature */
49#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
50
51/* get width of horizontal tabulator */
52#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
53/* Is echo of input enabled ? (independent from line discipline) */
54#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
55/* get number of colums for output */
56#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
57/* Is writing without final new line character enabled ? */
58#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
59/* get the maximum buffers size for output */
60#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
61/* Is conversion from upper to lower case of input enabled ? */
62#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
63/* get special character used for separating upper and lower case, */
64/* 0x00 disables this feature */
65#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
66/* get the number of buffers/pages got from kernel at startup */
67#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
68
69extern struct tty_driver *sclp_tty_driver; 16extern struct tty_driver *sclp_tty_driver;
70 17
71#endif /* __SCLP_TTY_H__ */ 18#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 3e577f655b18..ad51738c4261 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -27,7 +27,6 @@
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include "sclp.h" 28#include "sclp.h"
29 29
30#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
31#define SCLP_VT220_MAJOR TTY_MAJOR 30#define SCLP_VT220_MAJOR TTY_MAJOR
32#define SCLP_VT220_MINOR 65 31#define SCLP_VT220_MINOR 65
33#define SCLP_VT220_DRIVER_NAME "sclp_vt220" 32#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
@@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request;
82/* Number of characters in current request buffer */ 81/* Number of characters in current request buffer */
83static int sclp_vt220_buffered_chars; 82static int sclp_vt220_buffered_chars;
84 83
85/* Flag indicating whether this driver has already been initialized */ 84/* Counter controlling core driver initialization. */
86static int sclp_vt220_initialized = 0; 85static int __initdata sclp_vt220_init_count;
87 86
88/* Flag indicating that sclp_vt220_current_request should really 87/* Flag indicating that sclp_vt220_current_request should really
89 * have been already queued but wasn't because the SCLP was processing 88 * have been already queued but wasn't because the SCLP was processing
@@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
609 sclp_vt220_emit_current(); 608 sclp_vt220_emit_current();
610} 609}
611 610
612/* 611/* Release allocated pages. */
613 * Initialize all relevant components and register driver with system. 612static void __init __sclp_vt220_free_pages(void)
614 */
615static void __init __sclp_vt220_cleanup(void)
616{ 613{
617 struct list_head *page, *p; 614 struct list_head *page, *p;
618 615
@@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void)
623 else 620 else
624 free_bootmem((unsigned long) page, PAGE_SIZE); 621 free_bootmem((unsigned long) page, PAGE_SIZE);
625 } 622 }
626 if (!list_empty(&sclp_vt220_register.list))
627 sclp_unregister(&sclp_vt220_register);
628 sclp_vt220_initialized = 0;
629} 623}
630 624
631static int __init __sclp_vt220_init(void) 625/* Release memory and unregister from sclp core. Controlled by init counting -
626 * only the last invoker will actually perform these actions. */
627static void __init __sclp_vt220_cleanup(void)
628{
629 sclp_vt220_init_count--;
630 if (sclp_vt220_init_count != 0)
631 return;
632 sclp_unregister(&sclp_vt220_register);
633 __sclp_vt220_free_pages();
634}
635
636/* Allocate buffer pages and register with sclp core. Controlled by init
637 * counting - only the first invoker will actually perform these actions. */
638static int __init __sclp_vt220_init(int num_pages)
632{ 639{
633 void *page; 640 void *page;
634 int i; 641 int i;
635 int num_pages;
636 int rc; 642 int rc;
637 643
638 if (sclp_vt220_initialized) 644 sclp_vt220_init_count++;
645 if (sclp_vt220_init_count != 1)
639 return 0; 646 return 0;
640 sclp_vt220_initialized = 1;
641 spin_lock_init(&sclp_vt220_lock); 647 spin_lock_init(&sclp_vt220_lock);
642 INIT_LIST_HEAD(&sclp_vt220_empty); 648 INIT_LIST_HEAD(&sclp_vt220_empty);
643 INIT_LIST_HEAD(&sclp_vt220_outqueue); 649 INIT_LIST_HEAD(&sclp_vt220_outqueue);
@@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void)
649 sclp_vt220_flush_later = 0; 655 sclp_vt220_flush_later = 0;
650 656
651 /* Allocate pages for output buffering */ 657 /* Allocate pages for output buffering */
652 num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
653 for (i = 0; i < num_pages; i++) { 658 for (i = 0; i < num_pages; i++) {
654 if (slab_is_available()) 659 if (slab_is_available())
655 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 660 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
656 else 661 else
657 page = alloc_bootmem_low_pages(PAGE_SIZE); 662 page = alloc_bootmem_low_pages(PAGE_SIZE);
658 if (!page) { 663 if (!page) {
659 __sclp_vt220_cleanup(); 664 rc = -ENOMEM;
660 return -ENOMEM; 665 goto out;
661 } 666 }
662 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 667 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
663 } 668 }
664 rc = sclp_register(&sclp_vt220_register); 669 rc = sclp_register(&sclp_vt220_register);
670out:
665 if (rc) { 671 if (rc) {
666 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 672 __sclp_vt220_free_pages();
667 "could not register vt220 - " 673 sclp_vt220_init_count--;
668 "sclp_register returned %d\n", rc);
669 __sclp_vt220_cleanup();
670 } 674 }
671 return rc; 675 return rc;
672} 676}
@@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void)
689{ 693{
690 struct tty_driver *driver; 694 struct tty_driver *driver;
691 int rc; 695 int rc;
692 int cleanup;
693 696
694 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 697 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
695 * symmetry between VM and LPAR systems regarding ttyS1. */ 698 * symmetry between VM and LPAR systems regarding ttyS1. */
696 driver = alloc_tty_driver(1); 699 driver = alloc_tty_driver(1);
697 if (!driver) 700 if (!driver)
698 return -ENOMEM; 701 return -ENOMEM;
699 cleanup = !sclp_vt220_initialized; 702 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
700 rc = __sclp_vt220_init();
701 if (rc) 703 if (rc)
702 goto out_driver; 704 goto out_driver;
703 705
@@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void)
713 tty_set_operations(driver, &sclp_vt220_ops); 715 tty_set_operations(driver, &sclp_vt220_ops);
714 716
715 rc = tty_register_driver(driver); 717 rc = tty_register_driver(driver);
716 if (rc) { 718 if (rc)
717 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
718 "could not register tty - "
719 "tty_register_driver returned %d\n", rc);
720 goto out_init; 719 goto out_init;
721 }
722 sclp_vt220_driver = driver; 720 sclp_vt220_driver = driver;
723 return 0; 721 return 0;
724 722
725out_init: 723out_init:
726 if (cleanup) 724 __sclp_vt220_cleanup();
727 __sclp_vt220_cleanup();
728out_driver: 725out_driver:
729 put_tty_driver(driver); 726 put_tty_driver(driver);
730 return rc; 727 return rc;
@@ -773,10 +770,9 @@ sclp_vt220_con_init(void)
773{ 770{
774 int rc; 771 int rc;
775 772
776 INIT_LIST_HEAD(&sclp_vt220_register.list);
777 if (!CONSOLE_IS_SCLP) 773 if (!CONSOLE_IS_SCLP)
778 return 0; 774 return 0;
779 rc = __sclp_vt220_init(); 775 rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
780 if (rc) 776 if (rc)
781 return rc; 777 return rc;
782 /* Attach linux console */ 778 /* Attach linux console */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 874adf365e46..22ca34361ed7 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request)
196static int 196static int
197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) 197tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
198{ 198{
199 if (irb->scsw.dstat == 0x85 /* READY */) { 199 if (irb->scsw.cmd.dstat == 0x85) { /* READY */
200 /* A medium was inserted in the drive. */ 200 /* A medium was inserted in the drive. */
201 DBF_EVENT(6, "xuud med\n"); 201 DBF_EVENT(6, "xuud med\n");
202 tape_34xx_delete_sbid_from(device, 0); 202 tape_34xx_delete_sbid_from(device, 0);
@@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
844 if (request == NULL) 844 if (request == NULL)
845 return tape_34xx_unsolicited_irq(device, irb); 845 return tape_34xx_unsolicited_irq(device, irb);
846 846
847 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 847 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
848 (irb->scsw.dstat & DEV_STAT_DEV_END) && 848 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
849 (request->op == TO_WRI)) { 849 (request->op == TO_WRI)) {
850 /* Write at end of volume */ 850 /* Write at end of volume */
851 PRINT_INFO("End of volume\n"); /* XXX */ 851 PRINT_INFO("End of volume\n"); /* XXX */
852 return tape_34xx_erp_failed(request, -ENOSPC); 852 return tape_34xx_erp_failed(request, -ENOSPC);
853 } 853 }
854 854
855 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 855 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
856 return tape_34xx_unit_check(device, request, irb); 856 return tape_34xx_unit_check(device, request, irb);
857 857
858 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 858 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
859 /* 859 /*
860 * A unit exception occurs on skipping over a tapemark block. 860 * A unit exception occurs on skipping over a tapemark block.
861 */ 861 */
862 if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 862 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
863 if (request->op == TO_BSB || request->op == TO_FSB) 863 if (request->op == TO_BSB || request->op == TO_FSB)
864 request->rescnt++; 864 request->rescnt++;
865 else 865 else
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 42ce7915fc5d..839987618ffd 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
837static int 837static int
838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) 838tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
839{ 839{
840 if (irb->scsw.dstat == DEV_STAT_CHN_END) 840 if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
841 /* Probably result of halt ssch */ 841 /* Probably result of halt ssch */
842 return TAPE_IO_PENDING; 842 return TAPE_IO_PENDING;
843 else if (irb->scsw.dstat == 0x85) 843 else if (irb->scsw.cmd.dstat == 0x85)
844 /* Device Ready */ 844 /* Device Ready */
845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); 845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
846 else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 846 else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
847 tape_3590_schedule_work(device, TO_READ_ATTMSG); 847 tape_3590_schedule_work(device, TO_READ_ATTMSG);
848 } else { 848 } else {
849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
@@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1515 if (request == NULL) 1515 if (request == NULL)
1516 return tape_3590_unsolicited_irq(device, irb); 1516 return tape_3590_unsolicited_irq(device, irb);
1517 1517
1518 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && 1518 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
1519 (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { 1519 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
1520 (request->op == TO_WRI)) {
1520 /* Write at end of volume */ 1521 /* Write at end of volume */
1521 DBF_EVENT(2, "End of volume\n"); 1522 DBF_EVENT(2, "End of volume\n");
1522 return tape_3590_erp_failed(device, request, irb, -ENOSPC); 1523 return tape_3590_erp_failed(device, request, irb, -ENOSPC);
1523 } 1524 }
1524 1525
1525 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 1526 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
1526 return tape_3590_unit_check(device, request, irb); 1527 return tape_3590_unit_check(device, request, irb);
1527 1528
1528 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 1529 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1529 if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { 1530 if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
1530 if (request->op == TO_FSB || request->op == TO_BSB) 1531 if (request->op == TO_FSB || request->op == TO_BSB)
1531 request->rescnt++; 1532 request->rescnt++;
1532 else 1533 else
@@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1536 return tape_3590_done(device, request); 1537 return tape_3590_done(device, request);
1537 } 1538 }
1538 1539
1539 if (irb->scsw.dstat & DEV_STAT_CHN_END) { 1540 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
1540 DBF_EVENT(2, "cannel end\n"); 1541 DBF_EVENT(2, "cannel end\n");
1541 return TAPE_IO_PENDING; 1542 return TAPE_IO_PENDING;
1542 } 1543 }
1543 1544
1544 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1545 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1545 DBF_EVENT(2, "Unit Attention when busy..\n"); 1546 DBF_EVENT(2, "Unit Attention when busy..\n");
1546 return TAPE_IO_PENDING; 1547 return TAPE_IO_PENDING;
1547 } 1548 }
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index ebe84067bae9..687720b552d1 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/mtio.h> 16#include <linux/mtio.h>
17#include <linux/smp_lock.h>
17 18
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19 20
@@ -289,21 +290,26 @@ tapechar_open (struct inode *inode, struct file *filp)
289 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 290 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
290 return -ENODEV; 291 return -ENODEV;
291 292
293 lock_kernel();
292 minor = iminor(filp->f_path.dentry->d_inode); 294 minor = iminor(filp->f_path.dentry->d_inode);
293 device = tape_get_device(minor / TAPE_MINORS_PER_DEV); 295 device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
294 if (IS_ERR(device)) { 296 if (IS_ERR(device)) {
295 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); 297 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
296 return PTR_ERR(device); 298 rc = PTR_ERR(device);
299 goto out;
297 } 300 }
298 301
299 302
300 rc = tape_open(device); 303 rc = tape_open(device);
301 if (rc == 0) { 304 if (rc == 0) {
302 filp->private_data = device; 305 filp->private_data = device;
303 return nonseekable_open(inode, filp); 306 rc = nonseekable_open(inode, filp);
304 } 307 }
305 tape_put_device(device); 308 else
309 tape_put_device(device);
306 310
311out:
312 unlock_kernel();
307 return rc; 313 return rc;
308} 314}
309 315
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c20e3c548343..181a5441af16 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
839 839
840 PRINT_INFO("-------------------------------------------------\n"); 840 PRINT_INFO("-------------------------------------------------\n");
841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 841 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
842 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 842 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 843 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
844 if (request != NULL) 844 if (request != NULL)
845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 845 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
@@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
867 else 867 else
868 op = "---"; 868 op = "---";
869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 869 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
870 irb->scsw.dstat,irb->scsw.cstat); 870 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 871 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
872 sptr = (unsigned int *) irb->ecw; 872 sptr = (unsigned int *) irb->ecw;
873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 873 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
@@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1083 * error might still apply. So we just schedule the request to be 1083 * error might still apply. So we just schedule the request to be
1084 * started later. 1084 * started later.
1085 */ 1085 */
1086 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1086 if (irb->scsw.cmd.cc != 0 &&
1087 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1087 (request->status == TAPE_REQUEST_IN_IO)) { 1088 (request->status == TAPE_REQUEST_IN_IO)) {
1088 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1089 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1089 device->cdev_id, irb->scsw.cc, irb->scsw.fctl); 1090 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1090 request->status = TAPE_REQUEST_QUEUED; 1091 request->status = TAPE_REQUEST_QUEUED;
1091 schedule_delayed_work(&device->tape_dnr, HZ); 1092 schedule_delayed_work(&device->tape_dnr, HZ);
1092 return; 1093 return;
@@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 1095
1095 /* May be an unsolicited irq */ 1096 /* May be an unsolicited irq */
1096 if(request != NULL) 1097 if(request != NULL)
1097 request->rescnt = irb->scsw.count; 1098 request->rescnt = irb->scsw.cmd.count;
1098 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && 1099 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1099 !list_empty(&device->req_queue)) { 1100 !list_empty(&device->req_queue)) {
1100 /* Not Ready to Ready after long busy ? */ 1101 /* Not Ready to Ready after long busy ? */
1101 struct tape_request *req; 1102 struct tape_request *req;
@@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1111 return; 1112 return;
1112 } 1113 }
1113 } 1114 }
1114 if (irb->scsw.dstat != 0x0c) { 1115 if (irb->scsw.cmd.dstat != 0x0c) {
1115 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1116 /* Set the 'ONLINE' flag depending on sense byte 1 */
1116 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1117 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1117 device->tape_generic_status |= GMT_ONLINE(~0); 1118 device->tape_generic_status |= GMT_ONLINE(~0);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5043150019ac..a7fe6302c982 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -663,7 +663,7 @@ static int
663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) 663tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
664{ 664{
665 /* Handle ATTN. Schedule tasklet to read aid. */ 665 /* Handle ATTN. Schedule tasklet to read aid. */
666 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 666 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
667 if (!tp->throttle) 667 if (!tp->throttle)
668 tty3270_issue_read(tp, 0); 668 tty3270_issue_read(tp, 0);
669 else 669 else
@@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
671 } 671 }
672 672
673 if (rq) { 673 if (rq) {
674 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) 674 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
675 rq->rc = -EIO; 675 rq->rc = -EIO;
676 else 676 else
677 /* Normal end. Copy residual count. */ 677 /* Normal end. Copy residual count. */
678 rq->rescnt = irb->scsw.count; 678 rq->rescnt = irb->scsw.cmd.count;
679 } 679 }
680 return RAW3270_IO_DONE; 680 return RAW3270_IO_DONE;
681} 681}
@@ -1792,15 +1792,12 @@ static int __init tty3270_init(void)
1792 tty_set_operations(driver, &tty3270_ops); 1792 tty_set_operations(driver, &tty3270_ops);
1793 ret = tty_register_driver(driver); 1793 ret = tty_register_driver(driver);
1794 if (ret) { 1794 if (ret) {
1795 printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
1796 put_tty_driver(driver); 1795 put_tty_driver(driver);
1797 return ret; 1796 return ret;
1798 } 1797 }
1799 tty3270_driver = driver; 1798 tty3270_driver = driver;
1800 ret = raw3270_register_notifier(tty3270_notifier); 1799 ret = raw3270_register_notifier(tty3270_notifier);
1801 if (ret) { 1800 if (ret) {
1802 printk(KERN_ERR "tty3270 notifier registration failed "
1803 "with %d\n", ret);
1804 put_tty_driver(driver); 1801 put_tty_driver(driver);
1805 return ret; 1802 return ret;
1806 1803
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 2f419b0ea628..09e7d9bf438b 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/smp_lock.h>
19#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
20#include <asm/debug.h> 21#include <asm/debug.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
@@ -39,11 +40,14 @@ static int vmcp_open(struct inode *inode, struct file *file)
39 session = kmalloc(sizeof(*session), GFP_KERNEL); 40 session = kmalloc(sizeof(*session), GFP_KERNEL);
40 if (!session) 41 if (!session)
41 return -ENOMEM; 42 return -ENOMEM;
43
44 lock_kernel();
42 session->bufsize = PAGE_SIZE; 45 session->bufsize = PAGE_SIZE;
43 session->response = NULL; 46 session->response = NULL;
44 session->resp_size = 0; 47 session->resp_size = 0;
45 mutex_init(&session->mutex); 48 mutex_init(&session->mutex);
46 file->private_data = session; 49 file->private_data = session;
50 unlock_kernel();
47 return nonseekable_open(inode, file); 51 return nonseekable_open(inode, file);
48} 52}
49 53
@@ -61,30 +65,24 @@ static int vmcp_release(struct inode *inode, struct file *file)
61static ssize_t 65static ssize_t
62vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) 66vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
63{ 67{
64 size_t tocopy; 68 ssize_t ret;
69 size_t size;
65 struct vmcp_session *session; 70 struct vmcp_session *session;
66 71
67 session = (struct vmcp_session *)file->private_data; 72 session = file->private_data;
68 if (mutex_lock_interruptible(&session->mutex)) 73 if (mutex_lock_interruptible(&session->mutex))
69 return -ERESTARTSYS; 74 return -ERESTARTSYS;
70 if (!session->response) { 75 if (!session->response) {
71 mutex_unlock(&session->mutex); 76 mutex_unlock(&session->mutex);
72 return 0; 77 return 0;
73 } 78 }
74 if (*ppos > session->resp_size) { 79 size = min_t(size_t, session->resp_size, session->bufsize);
75 mutex_unlock(&session->mutex); 80 ret = simple_read_from_buffer(buff, count, ppos,
76 return 0; 81 session->response, size);
77 }
78 tocopy = min(session->resp_size - (size_t) (*ppos), count);
79 tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
80 82
81 if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
82 mutex_unlock(&session->mutex);
83 return -EFAULT;
84 }
85 mutex_unlock(&session->mutex); 83 mutex_unlock(&session->mutex);
86 *ppos += tocopy; 84
87 return tocopy; 85 return ret;
88} 86}
89 87
90static ssize_t 88static ssize_t
@@ -198,27 +196,23 @@ static int __init vmcp_init(void)
198 PRINT_WARN("z/VM CP interface is only available under z/VM\n"); 196 PRINT_WARN("z/VM CP interface is only available under z/VM\n");
199 return -ENODEV; 197 return -ENODEV;
200 } 198 }
199
201 vmcp_debug = debug_register("vmcp", 1, 1, 240); 200 vmcp_debug = debug_register("vmcp", 1, 1, 240);
202 if (!vmcp_debug) { 201 if (!vmcp_debug)
203 PRINT_ERR("z/VM CP interface not loaded. Could not register "
204 "debug feature\n");
205 return -ENOMEM; 202 return -ENOMEM;
206 } 203
207 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); 204 ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
208 if (ret) { 205 if (ret) {
209 PRINT_ERR("z/VM CP interface not loaded. Could not register "
210 "debug feature view. Error code: %d\n", ret);
211 debug_unregister(vmcp_debug); 206 debug_unregister(vmcp_debug);
212 return ret; 207 return ret;
213 } 208 }
209
214 ret = misc_register(&vmcp_dev); 210 ret = misc_register(&vmcp_dev);
215 if (ret) { 211 if (ret) {
216 PRINT_ERR("z/VM CP interface not loaded. Could not register "
217 "misc device. Error code: %d\n", ret);
218 debug_unregister(vmcp_debug); 212 debug_unregister(vmcp_debug);
219 return ret; 213 return ret;
220 } 214 }
221 PRINT_INFO("z/VM CP interface loaded\n"); 215
222 return 0; 216 return 0;
223} 217}
224 218
@@ -226,7 +220,6 @@ static void __exit vmcp_exit(void)
226{ 220{
227 misc_deregister(&vmcp_dev); 221 misc_deregister(&vmcp_dev);
228 debug_unregister(vmcp_debug); 222 debug_unregister(vmcp_debug);
229 PRINT_INFO("z/VM CP interface unloaded.\n");
230} 223}
231 224
232module_init(vmcp_init); 225module_init(vmcp_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 2c2428cc05d8..c31faefa2b3b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -25,6 +25,7 @@
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/cdev.h> 26#include <linux/cdev.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/smp_lock.h>
28#include <linux/string.h> 29#include <linux/string.h>
29 30
30 31
@@ -216,9 +217,7 @@ static int vmlogrdr_get_recording_class_AB(void)
216 char *tail; 217 char *tail;
217 int len,i; 218 int len,i;
218 219
219 printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
220 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 220 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
221 printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
222 len = strnlen(cp_response,sizeof(cp_response)); 221 len = strnlen(cp_response,sizeof(cp_response));
223 // now the parsing 222 // now the parsing
224 tail=strnchr(cp_response,len,'='); 223 tail=strnchr(cp_response,len,'=');
@@ -268,11 +267,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
268 logptr->recording_name, 267 logptr->recording_name,
269 qid_string); 268 qid_string);
270 269
271 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
272 cp_command);
273 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 270 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
274 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
275 cp_response);
276 } 271 }
277 272
278 memset(cp_command, 0x00, sizeof(cp_command)); 273 memset(cp_command, 0x00, sizeof(cp_command));
@@ -282,10 +277,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
282 onoff, 277 onoff,
283 qid_string); 278 qid_string);
284 279
285 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
286 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 280 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
287 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
288 cp_response);
289 /* The recording command will usually answer with 'Command complete' 281 /* The recording command will usually answer with 'Command complete'
290 * on success, but when the specific service was never connected 282 * on success, but when the specific service was never connected
291 * before then there might be an additional informational message 283 * before then there might be an additional informational message
@@ -319,9 +311,11 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
319 return -ENOSYS; 311 return -ENOSYS;
320 312
321 /* Besure this device hasn't already been opened */ 313 /* Besure this device hasn't already been opened */
314 lock_kernel();
322 spin_lock_bh(&logptr->priv_lock); 315 spin_lock_bh(&logptr->priv_lock);
323 if (logptr->dev_in_use) { 316 if (logptr->dev_in_use) {
324 spin_unlock_bh(&logptr->priv_lock); 317 spin_unlock_bh(&logptr->priv_lock);
318 unlock_kernel();
325 return -EBUSY; 319 return -EBUSY;
326 } 320 }
327 logptr->dev_in_use = 1; 321 logptr->dev_in_use = 1;
@@ -365,7 +359,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
365 || (logptr->iucv_path_severed)); 359 || (logptr->iucv_path_severed));
366 if (logptr->iucv_path_severed) 360 if (logptr->iucv_path_severed)
367 goto out_record; 361 goto out_record;
368 return nonseekable_open(inode, filp); 362 ret = nonseekable_open(inode, filp);
363 unlock_kernel();
364 return ret;
369 365
370out_record: 366out_record:
371 if (logptr->autorecording) 367 if (logptr->autorecording)
@@ -375,6 +371,7 @@ out_path:
375 logptr->path = NULL; 371 logptr->path = NULL;
376out_dev: 372out_dev:
377 logptr->dev_in_use = 0; 373 logptr->dev_in_use = 0;
374 unlock_kernel();
378 return -EIO; 375 return -EIO;
379} 376}
380 377
@@ -567,10 +564,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
567 "RECORDING %s PURGE ", 564 "RECORDING %s PURGE ",
568 priv->recording_name); 565 priv->recording_name);
569 566
570 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
571 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 567 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
572 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
573 cp_response);
574 568
575 return count; 569 return count;
576} 570}
@@ -682,28 +676,20 @@ static int vmlogrdr_register_driver(void)
682 676
683 /* Register with iucv driver */ 677 /* Register with iucv driver */
684 ret = iucv_register(&vmlogrdr_iucv_handler, 1); 678 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
685 if (ret) { 679 if (ret)
686 printk (KERN_ERR "vmlogrdr: failed to register with "
687 "iucv driver\n");
688 goto out; 680 goto out;
689 }
690 681
691 ret = driver_register(&vmlogrdr_driver); 682 ret = driver_register(&vmlogrdr_driver);
692 if (ret) { 683 if (ret)
693 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
694 goto out_iucv; 684 goto out_iucv;
695 }
696 685
697 ret = driver_create_file(&vmlogrdr_driver, 686 ret = driver_create_file(&vmlogrdr_driver,
698 &driver_attr_recording_status); 687 &driver_attr_recording_status);
699 if (ret) { 688 if (ret)
700 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
701 goto out_driver; 689 goto out_driver;
702 }
703 690
704 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 691 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
705 if (IS_ERR(vmlogrdr_class)) { 692 if (IS_ERR(vmlogrdr_class)) {
706 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
707 ret = PTR_ERR(vmlogrdr_class); 693 ret = PTR_ERR(vmlogrdr_class);
708 vmlogrdr_class = NULL; 694 vmlogrdr_class = NULL;
709 goto out_attr; 695 goto out_attr;
@@ -871,12 +857,10 @@ static int __init vmlogrdr_init(void)
871 rc = vmlogrdr_register_cdev(dev); 857 rc = vmlogrdr_register_cdev(dev);
872 if (rc) 858 if (rc)
873 goto cleanup; 859 goto cleanup;
874 printk (KERN_INFO "vmlogrdr: driver loaded\n");
875 return 0; 860 return 0;
876 861
877cleanup: 862cleanup:
878 vmlogrdr_cleanup(); 863 vmlogrdr_cleanup();
879 printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
880 return rc; 864 return rc;
881} 865}
882 866
@@ -884,7 +868,6 @@ cleanup:
884static void __exit vmlogrdr_exit(void) 868static void __exit vmlogrdr_exit(void)
885{ 869{
886 vmlogrdr_cleanup(); 870 vmlogrdr_cleanup();
887 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
888 return; 871 return;
889} 872}
890 873
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 83ae9a852f00..b0ac44b27127 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/cdev.h> 11#include <linux/cdev.h>
12#include <linux/smp_lock.h>
12 13
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
@@ -277,7 +278,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
277 struct urdev *urd; 278 struct urdev *urd;
278 279
279 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 280 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
280 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); 281 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
282 irb->scsw.cmd.count);
281 283
282 if (!intparm) { 284 if (!intparm) {
283 TRACE("ur_int_handler: unsolicited interrupt\n"); 285 TRACE("ur_int_handler: unsolicited interrupt\n");
@@ -288,7 +290,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
288 /* On special conditions irb is an error pointer */ 290 /* On special conditions irb is an error pointer */
289 if (IS_ERR(irb)) 291 if (IS_ERR(irb))
290 urd->io_request_rc = PTR_ERR(irb); 292 urd->io_request_rc = PTR_ERR(irb);
291 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 293 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
292 urd->io_request_rc = 0; 294 urd->io_request_rc = 0;
293 else 295 else
294 urd->io_request_rc = -EIO; 296 urd->io_request_rc = -EIO;
@@ -343,7 +345,7 @@ static int get_urd_class(struct urdev *urd)
343 cc = diag210(&ur_diag210); 345 cc = diag210(&ur_diag210);
344 switch (cc) { 346 switch (cc) {
345 case 0: 347 case 0:
346 return -ENOTSUPP; 348 return -EOPNOTSUPP;
347 case 2: 349 case 2:
348 return ur_diag210.vrdcvcla; /* virtual device class */ 350 return ur_diag210.vrdcvcla; /* virtual device class */
349 case 3: 351 case 3:
@@ -619,7 +621,7 @@ static int verify_device(struct urdev *urd)
619 case DEV_CLASS_UR_I: 621 case DEV_CLASS_UR_I:
620 return verify_uri_device(urd); 622 return verify_uri_device(urd);
621 default: 623 default:
622 return -ENOTSUPP; 624 return -EOPNOTSUPP;
623 } 625 }
624} 626}
625 627
@@ -652,7 +654,7 @@ static int get_file_reclen(struct urdev *urd)
652 case DEV_CLASS_UR_I: 654 case DEV_CLASS_UR_I:
653 return get_uri_file_reclen(urd); 655 return get_uri_file_reclen(urd);
654 default: 656 default:
655 return -ENOTSUPP; 657 return -EOPNOTSUPP;
656 } 658 }
657} 659}
658 660
@@ -668,7 +670,7 @@ static int ur_open(struct inode *inode, struct file *file)
668 670
669 if (accmode == O_RDWR) 671 if (accmode == O_RDWR)
670 return -EACCES; 672 return -EACCES;
671 673 lock_kernel();
672 /* 674 /*
673 * We treat the minor number as the devno of the ur device 675 * We treat the minor number as the devno of the ur device
674 * to find in the driver tree. 676 * to find in the driver tree.
@@ -676,8 +678,10 @@ static int ur_open(struct inode *inode, struct file *file)
676 devno = MINOR(file->f_dentry->d_inode->i_rdev); 678 devno = MINOR(file->f_dentry->d_inode->i_rdev);
677 679
678 urd = urdev_get_from_devno(devno); 680 urd = urdev_get_from_devno(devno);
679 if (!urd) 681 if (!urd) {
680 return -ENXIO; 682 rc = -ENXIO;
683 goto out;
684 }
681 685
682 spin_lock(&urd->open_lock); 686 spin_lock(&urd->open_lock);
683 while (urd->open_flag) { 687 while (urd->open_flag) {
@@ -720,6 +724,7 @@ static int ur_open(struct inode *inode, struct file *file)
720 goto fail_urfile_free; 724 goto fail_urfile_free;
721 urf->file_reclen = rc; 725 urf->file_reclen = rc;
722 file->private_data = urf; 726 file->private_data = urf;
727 unlock_kernel();
723 return 0; 728 return 0;
724 729
725fail_urfile_free: 730fail_urfile_free:
@@ -730,6 +735,8 @@ fail_unlock:
730 spin_unlock(&urd->open_lock); 735 spin_unlock(&urd->open_lock);
731fail_put: 736fail_put:
732 urdev_put(urd); 737 urdev_put(urd);
738out:
739 unlock_kernel();
733 return rc; 740 return rc;
734} 741}
735 742
@@ -820,7 +827,7 @@ static int ur_probe(struct ccw_device *cdev)
820 goto fail_remove_attr; 827 goto fail_remove_attr;
821 } 828 }
822 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 829 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
823 rc = -ENOTSUPP; 830 rc = -EOPNOTSUPP;
824 goto fail_remove_attr; 831 goto fail_remove_attr;
825 } 832 }
826 spin_lock_irq(get_ccwdev_lock(cdev)); 833 spin_lock_irq(get_ccwdev_lock(cdev));
@@ -885,7 +892,7 @@ static int ur_set_online(struct ccw_device *cdev)
885 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 892 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
886 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); 893 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
887 } else { 894 } else {
888 rc = -ENOTSUPP; 895 rc = -EOPNOTSUPP;
889 goto fail_free_cdev; 896 goto fail_free_cdev;
890 } 897 }
891 898
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 19f8389291b6..21a2a829bf4e 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/watchdog.h> 15#include <linux/watchdog.h>
16#include <linux/smp_lock.h>
16 17
17#include <asm/ebcdic.h> 18#include <asm/ebcdic.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -92,23 +93,15 @@ static int vmwdt_keepalive(void)
92 93
93 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; 94 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
94 ret = __diag288(func, vmwdt_interval, ebc_cmd, len); 95 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
96 WARN_ON(ret != 0);
95 kfree(ebc_cmd); 97 kfree(ebc_cmd);
96
97 if (ret) {
98 printk(KERN_WARNING "%s: problem setting interval %d, "
99 "cmd %s\n", __func__, vmwdt_interval,
100 vmwdt_cmd);
101 }
102 return ret; 98 return ret;
103} 99}
104 100
105static int vmwdt_disable(void) 101static int vmwdt_disable(void)
106{ 102{
107 int ret = __diag288(wdt_cancel, 0, "", 0); 103 int ret = __diag288(wdt_cancel, 0, "", 0);
108 if (ret) { 104 WARN_ON(ret != 0);
109 printk(KERN_WARNING "%s: problem disabling watchdog\n",
110 __func__);
111 }
112 return ret; 105 return ret;
113} 106}
114 107
@@ -121,21 +114,23 @@ static int __init vmwdt_probe(void)
121 static char __initdata ebc_begin[] = { 114 static char __initdata ebc_begin[] = {
122 194, 197, 199, 201, 213 115 194, 197, 199, 201, 213
123 }; 116 };
124 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { 117 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
125 printk(KERN_INFO "z/VM watchdog not available\n");
126 return -EINVAL; 118 return -EINVAL;
127 }
128 return vmwdt_disable(); 119 return vmwdt_disable();
129} 120}
130 121
131static int vmwdt_open(struct inode *i, struct file *f) 122static int vmwdt_open(struct inode *i, struct file *f)
132{ 123{
133 int ret; 124 int ret;
134 if (test_and_set_bit(0, &vmwdt_is_open)) 125 lock_kernel();
126 if (test_and_set_bit(0, &vmwdt_is_open)) {
127 unlock_kernel();
135 return -EBUSY; 128 return -EBUSY;
129 }
136 ret = vmwdt_keepalive(); 130 ret = vmwdt_keepalive();
137 if (ret) 131 if (ret)
138 clear_bit(0, &vmwdt_is_open); 132 clear_bit(0, &vmwdt_is_open);
133 unlock_kernel();
139 return ret ? ret : nonseekable_open(i, f); 134 return ret ? ret : nonseekable_open(i, f);
140} 135}
141 136
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bbbd14e9d48f..7fd84be11931 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -29,6 +29,7 @@
29 29
30#define TO_USER 0 30#define TO_USER 0
31#define TO_KERNEL 1 31#define TO_KERNEL 1
32#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
32 33
33enum arch_id { 34enum arch_id {
34 ARCH_S390 = 0, 35 ARCH_S390 = 0,
@@ -51,6 +52,7 @@ static struct debug_info *zcore_dbf;
51static int hsa_available; 52static int hsa_available;
52static struct dentry *zcore_dir; 53static struct dentry *zcore_dir;
53static struct dentry *zcore_file; 54static struct dentry *zcore_file;
55static struct dentry *zcore_memmap_file;
54 56
55/* 57/*
56 * Copy memory from HSA to kernel or user memory (not reentrant): 58 * Copy memory from HSA to kernel or user memory (not reentrant):
@@ -223,12 +225,10 @@ static int __init init_cpu_info(enum arch_id arch)
223 /* get info for boot cpu from lowcore, stored in the HSA */ 225 /* get info for boot cpu from lowcore, stored in the HSA */
224 226
225 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 227 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
226 if (!sa) { 228 if (!sa)
227 ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
228 return -ENOMEM; 229 return -ENOMEM;
229 }
230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 230 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
231 ERROR_MSG("could not copy from HSA\n"); 231 TRACE("could not copy from HSA\n");
232 kfree(sa); 232 kfree(sa);
233 return -EIO; 233 return -EIO;
234 } 234 }
@@ -478,6 +478,54 @@ static const struct file_operations zcore_fops = {
478 .release = zcore_release, 478 .release = zcore_release,
479}; 479};
480 480
481static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
482 size_t count, loff_t *ppos)
483{
484 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
485 MEMORY_CHUNKS * CHUNK_INFO_SIZE);
486}
487
488static int zcore_memmap_open(struct inode *inode, struct file *filp)
489{
490 int i;
491 char *buf;
492 struct mem_chunk *chunk_array;
493
494 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
495 GFP_KERNEL);
496 if (!chunk_array)
497 return -ENOMEM;
498 detect_memory_layout(chunk_array);
499 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
500 if (!buf) {
501 kfree(chunk_array);
502 return -ENOMEM;
503 }
504 for (i = 0; i < MEMORY_CHUNKS; i++) {
505 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
506 (unsigned long long) chunk_array[i].addr,
507 (unsigned long long) chunk_array[i].size);
508 if (chunk_array[i].size == 0)
509 break;
510 }
511 kfree(chunk_array);
512 filp->private_data = buf;
513 return 0;
514}
515
516static int zcore_memmap_release(struct inode *inode, struct file *filp)
517{
518 kfree(filp->private_data);
519 return 0;
520}
521
522static const struct file_operations zcore_memmap_fops = {
523 .owner = THIS_MODULE,
524 .read = zcore_memmap_read,
525 .open = zcore_memmap_open,
526 .release = zcore_memmap_release,
527};
528
481 529
482static void __init set_s390_lc_mask(union save_area *map) 530static void __init set_s390_lc_mask(union save_area *map)
483{ 531{
@@ -511,6 +559,8 @@ static void __init set_s390x_lc_mask(union save_area *map)
511 */ 559 */
512static int __init sys_info_init(enum arch_id arch) 560static int __init sys_info_init(enum arch_id arch)
513{ 561{
562 int rc;
563
514 switch (arch) { 564 switch (arch) {
515 case ARCH_S390X: 565 case ARCH_S390X:
516 MSG("DETECTED 'S390X (64 bit) OS'\n"); 566 MSG("DETECTED 'S390X (64 bit) OS'\n");
@@ -529,10 +579,9 @@ static int __init sys_info_init(enum arch_id arch)
529 return -EINVAL; 579 return -EINVAL;
530 } 580 }
531 sys_info.arch = arch; 581 sys_info.arch = arch;
532 if (init_cpu_info(arch)) { 582 rc = init_cpu_info(arch);
533 ERROR_MSG("get cpu info failed\n"); 583 if (rc)
534 return -ENOMEM; 584 return rc;
535 }
536 sys_info.mem_size = real_memory_size; 585 sys_info.mem_size = real_memory_size;
537 586
538 return 0; 587 return 0;
@@ -544,29 +593,55 @@ static int __init check_sdias(void)
544 593
545 rc = sclp_sdias_blk_count(); 594 rc = sclp_sdias_blk_count();
546 if (rc < 0) { 595 if (rc < 0) {
547 ERROR_MSG("Could not determine HSA size\n"); 596 TRACE("Could not determine HSA size\n");
548 return rc; 597 return rc;
549 } 598 }
550 act_hsa_size = (rc - 1) * PAGE_SIZE; 599 act_hsa_size = (rc - 1) * PAGE_SIZE;
551 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { 600 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
552 ERROR_MSG("HSA size too small: %i\n", act_hsa_size); 601 TRACE("HSA size too small: %i\n", act_hsa_size);
553 return -EINVAL; 602 return -EINVAL;
554 } 603 }
555 return 0; 604 return 0;
556} 605}
557 606
558static void __init zcore_header_init(int arch, struct zcore_header *hdr) 607static int __init get_mem_size(unsigned long *mem)
559{ 608{
609 int i;
610 struct mem_chunk *chunk_array;
611
612 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
613 GFP_KERNEL);
614 if (!chunk_array)
615 return -ENOMEM;
616 detect_memory_layout(chunk_array);
617 for (i = 0; i < MEMORY_CHUNKS; i++) {
618 if (chunk_array[i].size == 0)
619 break;
620 *mem += chunk_array[i].size;
621 }
622 kfree(chunk_array);
623 return 0;
624}
625
626static int __init zcore_header_init(int arch, struct zcore_header *hdr)
627{
628 int rc;
629 unsigned long memory = 0;
630
560 if (arch == ARCH_S390X) 631 if (arch == ARCH_S390X)
561 hdr->arch_id = DUMP_ARCH_S390X; 632 hdr->arch_id = DUMP_ARCH_S390X;
562 else 633 else
563 hdr->arch_id = DUMP_ARCH_S390; 634 hdr->arch_id = DUMP_ARCH_S390;
564 hdr->mem_size = sys_info.mem_size; 635 rc = get_mem_size(&memory);
565 hdr->rmem_size = sys_info.mem_size; 636 if (rc)
637 return rc;
638 hdr->mem_size = memory;
639 hdr->rmem_size = memory;
566 hdr->mem_end = sys_info.mem_size; 640 hdr->mem_end = sys_info.mem_size;
567 hdr->num_pages = sys_info.mem_size / PAGE_SIZE; 641 hdr->num_pages = memory / PAGE_SIZE;
568 hdr->tod = get_clock(); 642 hdr->tod = get_clock();
569 get_cpu_id(&hdr->cpu_id); 643 get_cpu_id(&hdr->cpu_id);
644 return 0;
570} 645}
571 646
572static int __init zcore_init(void) 647static int __init zcore_init(void)
@@ -590,16 +665,12 @@ static int __init zcore_init(void)
590 goto fail; 665 goto fail;
591 666
592 rc = check_sdias(); 667 rc = check_sdias();
593 if (rc) { 668 if (rc)
594 ERROR_MSG("Dump initialization failed\n");
595 goto fail; 669 goto fail;
596 }
597 670
598 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 671 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
599 if (rc) { 672 if (rc)
600 ERROR_MSG("sdial memcpy for arch id failed\n");
601 goto fail; 673 goto fail;
602 }
603 674
604#ifndef __s390x__ 675#ifndef __s390x__
605 if (arch == ARCH_S390X) { 676 if (arch == ARCH_S390X) {
@@ -610,12 +681,12 @@ static int __init zcore_init(void)
610#endif 681#endif
611 682
612 rc = sys_info_init(arch); 683 rc = sys_info_init(arch);
613 if (rc) { 684 if (rc)
614 ERROR_MSG("arch init failed\n");
615 goto fail; 685 goto fail;
616 }
617 686
618 zcore_header_init(arch, &zcore_header); 687 rc = zcore_header_init(arch, &zcore_header);
688 if (rc)
689 goto fail;
619 690
620 zcore_dir = debugfs_create_dir("zcore" , NULL); 691 zcore_dir = debugfs_create_dir("zcore" , NULL);
621 if (!zcore_dir) { 692 if (!zcore_dir) {
@@ -625,13 +696,22 @@ static int __init zcore_init(void)
625 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, 696 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
626 &zcore_fops); 697 &zcore_fops);
627 if (!zcore_file) { 698 if (!zcore_file) {
628 debugfs_remove(zcore_dir);
629 rc = -ENOMEM; 699 rc = -ENOMEM;
630 goto fail; 700 goto fail_dir;
701 }
702 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
703 NULL, &zcore_memmap_fops);
704 if (!zcore_memmap_file) {
705 rc = -ENOMEM;
706 goto fail_file;
631 } 707 }
632 hsa_available = 1; 708 hsa_available = 1;
633 return 0; 709 return 0;
634 710
711fail_file:
712 debugfs_remove(zcore_file);
713fail_dir:
714 debugfs_remove(zcore_dir);
635fail: 715fail:
636 diag308(DIAG308_REL_HSA, NULL); 716 diag308(DIAG308_REL_HSA, NULL);
637 return rc; 717 return rc;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index cfaf77b320f5..bd79bd165396 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,9 +2,13 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
9obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
10obj-$(CONFIG_QDIO) += qdio.o 14obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index b7a07a866291..fe6cea15bbaf 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -15,6 +15,7 @@
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h> 17#include <asm/airq.h>
18#include <asm/isc.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -33,15 +34,15 @@ struct airq_t {
33 void *drv_data; 34 void *drv_data;
34}; 35};
35 36
36static union indicator_t indicators; 37static union indicator_t indicators[MAX_ISC];
37static struct airq_t *airqs[NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
38 39
39static int register_airq(struct airq_t *airq) 40static int register_airq(struct airq_t *airq, u8 isc)
40{ 41{
41 int i; 42 int i;
42 43
43 for (i = 0; i < NR_AIRQS; i++) 44 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq)) 45 if (!cmpxchg(&airqs[isc][i], NULL, airq))
45 return i; 46 return i;
46 return -ENOMEM; 47 return -ENOMEM;
47} 48}
@@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq)
50 * s390_register_adapter_interrupt() - register adapter interrupt handler 51 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered 52 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler 53 * @drv_data: driver data passed with each call to the handler
54 * @isc: isc for which the handler should be called
53 * 55 *
54 * Returns: 56 * Returns:
55 * Pointer to the indicator to be used on success 57 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed 58 * ERR_PTR() if registration failed
57 */ 59 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler, 60void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data) 61 void *drv_data, u8 isc)
60{ 62{
61 struct airq_t *airq; 63 struct airq_t *airq;
62 char dbf_txt[16]; 64 char dbf_txt[16];
63 int ret; 65 int ret;
64 66
67 if (isc > MAX_ISC)
68 return ERR_PTR(-EINVAL);
65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); 69 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
66 if (!airq) { 70 if (!airq) {
67 ret = -ENOMEM; 71 ret = -ENOMEM;
@@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
69 } 73 }
70 airq->handler = handler; 74 airq->handler = handler;
71 airq->drv_data = drv_data; 75 airq->drv_data = drv_data;
72 ret = register_airq(airq); 76
73 if (ret < 0) 77 ret = register_airq(airq, isc);
74 kfree(airq);
75out: 78out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); 79 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt); 80 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0) 81 if (ret < 0) {
82 kfree(airq);
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 else 84 } else
81 return &indicators.byte[ret]; 85 return &indicators[isc].byte[ret];
82} 86}
83EXPORT_SYMBOL(s390_register_adapter_interrupt); 87EXPORT_SYMBOL(s390_register_adapter_interrupt);
84 88
85/** 89/**
86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler 90 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered 91 * @ind: indicator for which the handler is to be unregistered
92 * @isc: interruption subclass
88 */ 93 */
89void s390_unregister_adapter_interrupt(void *ind) 94void s390_unregister_adapter_interrupt(void *ind, u8 isc)
90{ 95{
91 struct airq_t *airq; 96 struct airq_t *airq;
92 char dbf_txt[16]; 97 char dbf_txt[16];
93 int i; 98 int i;
94 99
95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); 100 i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); 101 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt); 102 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0; 103 indicators[isc].byte[i] = 0;
99 airq = xchg(&airqs[i], NULL); 104 airq = xchg(&airqs[isc][i], NULL);
100 /* 105 /*
101 * Allow interrupts to complete. This will ensure that the airq handle 106 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler. 107 * is no longer referenced by any interrupt handler.
@@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108 113
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) 114#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
110 115
111void do_adapter_IO(void) 116void do_adapter_IO(u8 isc)
112{ 117{
113 int w; 118 int w;
114 int i; 119 int i;
@@ -120,22 +125,22 @@ void do_adapter_IO(void)
120 * fetch operations. 125 * fetch operations.
121 */ 126 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) { 127 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w]; 128 word = indicators[isc].word[w];
124 i = w * NR_AIRQS_PER_WORD; 129 i = w * NR_AIRQS_PER_WORD;
125 /* 130 /*
126 * Check bytes within word for active indicators. 131 * Check bytes within word for active indicators.
127 */ 132 */
128 while (word) { 133 while (word) {
129 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
130 airq = airqs[i]; 135 airq = airqs[isc][i];
131 if (likely(airq)) 136 if (likely(airq))
132 airq->handler(&indicators.byte[i], 137 airq->handler(&indicators[isc].byte[i],
133 airq->drv_data); 138 airq->drv_data);
134 else 139 else
135 /* 140 /*
136 * Reset ill-behaved indicator. 141 * Reset ill-behaved indicator.
137 */ 142 */
138 indicators.byte[i] = 0; 143 indicators[isc].byte[i] = 0;
139 } 144 }
140 word <<= 8; 145 word <<= 8;
141 i++; 146 i++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 297cdceb0ca4..db00b0591733 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -18,6 +18,7 @@
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20 20
21#include "../s390mach.h"
21#include "cio.h" 22#include "cio.h"
22#include "css.h" 23#include "css.h"
23#include "ioasm.h" 24#include "ioasm.h"
@@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
94 } 95 }
95 return opm; 96 return opm;
96} 97}
98EXPORT_SYMBOL_GPL(chp_get_sch_opm);
97 99
98/** 100/**
99 * chp_is_registered - check if a channel-path is registered 101 * chp_is_registered - check if a channel-path is registered
@@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
121 CIO_TRACE_EVENT(2, dbf_text); 123 CIO_TRACE_EVENT(2, dbf_text);
122 124
123 status = chp_get_status(chpid); 125 status = chp_get_status(chpid);
124 if (!on && !status) { 126 if (!on && !status)
125 printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", 127 return 0;
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129 128
130 set_chp_logically_online(chpid, on); 129 set_chp_logically_online(chpid, on);
131 chsc_chp_vary(chpid, on); 130 chsc_chp_vary(chpid, on);
@@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
141{ 140{
142 struct channel_path *chp; 141 struct channel_path *chp;
143 struct device *device; 142 struct device *device;
144 unsigned int size;
145 143
146 device = container_of(kobj, struct device, kobj); 144 device = container_of(kobj, struct device, kobj);
147 chp = to_channelpath(device); 145 chp = to_channelpath(device);
148 if (!chp->cmg_chars) 146 if (!chp->cmg_chars)
149 return 0; 147 return 0;
150 148
151 size = sizeof(struct cmg_chars); 149 return memory_read_from_buffer(buf, count, &off,
152 150 chp->cmg_chars, sizeof(struct cmg_chars));
153 if (off > size)
154 return 0;
155 if (off + count > size)
156 count = size - off;
157 memcpy(buf, chp->cmg_chars + off, count);
158 return count;
159} 151}
160 152
161static struct bin_attribute chp_measurement_chars_attr = { 153static struct bin_attribute chp_measurement_chars_attr = {
@@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid)
405 chpid.id); 397 chpid.id);
406 398
407 /* Obtain channel path description and fill it in. */ 399 /* Obtain channel path description and fill it in. */
408 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
409 if (ret) 401 if (ret)
410 goto out_free; 402 goto out_free;
411 if ((chp->desc.flags & 0x80) == 0) { 403 if ((chp->desc.flags & 0x80) == 0) {
@@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid)
413 goto out_free; 405 goto out_free;
414 } 406 }
415 /* Get channel-measurement characteristics. */ 407 /* Get channel-measurement characteristics. */
416 if (css_characteristics_avail && css_chsc_characteristics.scmc 408 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
417 && css_chsc_characteristics.secm) {
418 ret = chsc_get_channel_measurement_chars(chp); 409 ret = chsc_get_channel_measurement_chars(chp);
419 if (ret) 410 if (ret)
420 goto out_free; 411 goto out_free;
@@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid)
476 467
477/** 468/**
478 * chp_process_crw - process channel-path status change 469 * chp_process_crw - process channel-path status change
479 * @id: channel-path ID number 470 * @crw0: channel report-word to handler
480 * @status: non-zero if channel-path has become available, zero otherwise 471 * @crw1: second channel-report word (always NULL)
472 * @overflow: crw overflow indication
481 * 473 *
482 * Handle channel-report-words indicating that the status of a channel-path 474 * Handle channel-report-words indicating that the status of a channel-path
483 * has changed. 475 * has changed.
484 */ 476 */
485void chp_process_crw(int id, int status) 477static void chp_process_crw(struct crw *crw0, struct crw *crw1,
478 int overflow)
486{ 479{
487 struct chp_id chpid; 480 struct chp_id chpid;
488 481
482 if (overflow) {
483 css_schedule_eval_all();
484 return;
485 }
486 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
487 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
488 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
489 crw0->erc, crw0->rsid);
490 /*
491 * Check for solicited machine checks. These are
492 * created by reset channel path and need not be
493 * handled here.
494 */
495 if (crw0->slct) {
496 CIO_CRW_EVENT(2, "solicited machine check for "
497 "channel path %02X\n", crw0->rsid);
498 return;
499 }
489 chp_id_init(&chpid); 500 chp_id_init(&chpid);
490 chpid.id = id; 501 chpid.id = crw0->rsid;
491 if (status) { 502 switch (crw0->erc) {
503 case CRW_ERC_IPARM: /* Path has come. */
492 if (!chp_is_registered(chpid)) 504 if (!chp_is_registered(chpid))
493 chp_new(chpid); 505 chp_new(chpid);
494 chsc_chp_online(chpid); 506 chsc_chp_online(chpid);
495 } else 507 break;
508 case CRW_ERC_PERRI: /* Path has gone. */
509 case CRW_ERC_PERRN:
496 chsc_chp_offline(chpid); 510 chsc_chp_offline(chpid);
511 break;
512 default:
513 CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
514 crw0->erc);
515 }
497} 516}
498 517
518int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
519{
520 int i;
521 int mask;
522
523 for (i = 0; i < 8; i++) {
524 mask = 0x80 >> i;
525 if (!(ssd->path_mask & mask))
526 continue;
527 if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
528 continue;
529 if ((ssd->fla_valid_mask & mask) &&
530 ((ssd->fla[i] & link->fla_mask) != link->fla))
531 continue;
532 return mask;
533 }
534 return 0;
535}
536EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
537
499static inline int info_bit_num(struct chp_id id) 538static inline int info_bit_num(struct chp_id id)
500{ 539{
501 return id.id + id.cssid * (__MAX_CHPID + 1); 540 return id.id + id.cssid * (__MAX_CHPID + 1);
@@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work)
575{ 614{
576 struct chp_id chpid; 615 struct chp_id chpid;
577 enum cfg_task_t t; 616 enum cfg_task_t t;
617 int rc;
578 618
579 mutex_lock(&cfg_lock); 619 mutex_lock(&cfg_lock);
580 t = cfg_none; 620 t = cfg_none;
@@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work)
589 629
590 switch (t) { 630 switch (t) {
591 case cfg_configure: 631 case cfg_configure:
592 sclp_chp_configure(chpid); 632 rc = sclp_chp_configure(chpid);
593 info_expire(); 633 if (rc)
594 chsc_chp_online(chpid); 634 CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
635 "%d\n", chpid.cssid, chpid.id, rc);
636 else {
637 info_expire();
638 chsc_chp_online(chpid);
639 }
595 break; 640 break;
596 case cfg_deconfigure: 641 case cfg_deconfigure:
597 sclp_chp_deconfigure(chpid); 642 rc = sclp_chp_deconfigure(chpid);
598 info_expire(); 643 if (rc)
599 chsc_chp_offline(chpid); 644 CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
645 "%d\n", chpid.cssid, chpid.id, rc);
646 else {
647 info_expire();
648 chsc_chp_offline(chpid);
649 }
600 break; 650 break;
601 case cfg_none: 651 case cfg_none:
602 /* Get updated information after last change. */ 652 /* Get updated information after last change. */
@@ -654,10 +704,16 @@ static int cfg_wait_idle(void)
654static int __init chp_init(void) 704static int __init chp_init(void)
655{ 705{
656 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret;
657 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret)
711 return ret;
658 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
659 if (!chp_wq) 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH);
660 return -ENOMEM; 715 return -ENOMEM;
716 }
661 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
662 init_waitqueue_head(&cfg_wait_queue); 718 init_waitqueue_head(&cfg_wait_queue);
663 if (info_update()) 719 if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 65286563c592..26c3d2246176 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -12,12 +12,24 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <asm/chpid.h> 13#include <asm/chpid.h>
14#include "chsc.h" 14#include "chsc.h"
15#include "css.h"
15 16
16#define CHP_STATUS_STANDBY 0 17#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1 18#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2 19#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3 20#define CHP_STATUS_NOT_RECOGNIZED 3
20 21
22#define CHP_ONLINE 0
23#define CHP_OFFLINE 1
24#define CHP_VARY_ON 2
25#define CHP_VARY_OFF 3
26
27struct chp_link {
28 struct chp_id chpid;
29 u32 fla_mask;
30 u16 fla;
31};
32
21static inline int chp_test_bit(u8 *bitmap, int num) 33static inline int chp_test_bit(u8 *bitmap, int num)
22{ 34{
23 int byte = num >> 3; 35 int byte = num >> 3;
@@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch); 54u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid); 55int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid); 56void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp); 57void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp); 58int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid); 59int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure); 60void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid); 61void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid); 62int chp_info_get_status(struct chp_id chpid);
52 63int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
53#endif /* S390_CHP_H */ 64#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 5de86908b0d0..29826fdd47b8 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2008
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -16,7 +15,9 @@
16 15
17#include <asm/cio.h> 16#include <asm/cio.h>
18#include <asm/chpid.h> 17#include <asm/chpid.h>
18#include <asm/chsc.h>
19 19
20#include "../s390mach.h"
20#include "css.h" 21#include "css.h"
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -26,7 +27,13 @@
26 27
27static void *sei_page; 28static void *sei_page;
28 29
29static int chsc_error_from_response(int response) 30/**
31 * chsc_error_from_response() - convert a chsc response to an error
32 * @response: chsc response code
33 *
34 * Returns an appropriate Linux error code for @response.
35 */
36int chsc_error_from_response(int response)
30{ 37{
31 switch (response) { 38 switch (response) {
32 case 0x0001: 39 case 0x0001:
@@ -44,6 +51,7 @@ static int chsc_error_from_response(int response)
44 return -EIO; 51 return -EIO;
45 } 52 }
46} 53}
54EXPORT_SYMBOL_GPL(chsc_error_from_response);
47 55
48struct chsc_ssd_area { 56struct chsc_ssd_area {
49 struct chsc_header request; 57 struct chsc_header request;
@@ -127,77 +135,12 @@ out_free:
127 return ret; 135 return ret;
128} 136}
129 137
130static int check_for_io_on_path(struct subchannel *sch, int mask)
131{
132 int cc;
133
134 cc = stsch(sch->schid, &sch->schib);
135 if (cc)
136 return 0;
137 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138 return 1;
139 return 0;
140}
141
142static void terminate_internal_io(struct subchannel *sch)
143{
144 if (cio_clear(sch)) {
145 /* Recheck device in case clear failed. */
146 sch->lpm = 0;
147 if (device_trigger_verify(sch) != 0)
148 css_schedule_eval(sch->schid);
149 return;
150 }
151 /* Request retry of internal operation. */
152 device_set_intretry(sch);
153 /* Call handler. */
154 if (sch->driver && sch->driver->termination)
155 sch->driver->termination(sch);
156}
157
158static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 138static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159{ 139{
160 int j;
161 int mask;
162 struct chp_id *chpid = data;
163 struct schib schib;
164
165 for (j = 0; j < 8; j++) {
166 mask = 0x80 >> j;
167 if ((sch->schib.pmcw.pim & mask) &&
168 (sch->schib.pmcw.chpid[j] == chpid->id))
169 break;
170 }
171 if (j >= 8)
172 return 0;
173
174 spin_lock_irq(sch->lock); 140 spin_lock_irq(sch->lock);
175 141 if (sch->driver && sch->driver->chp_event)
176 stsch(sch->schid, &schib); 142 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
177 if (!css_sch_is_valid(&schib))
178 goto out_unreg;
179 memcpy(&sch->schib, &schib, sizeof(struct schib));
180 /* Check for single path devices. */
181 if (sch->schib.pmcw.pim == 0x80)
182 goto out_unreg;
183
184 if (check_for_io_on_path(sch, mask)) {
185 if (device_is_online(sch))
186 device_kill_io(sch);
187 else {
188 terminate_internal_io(sch);
189 /* Re-start path verification. */
190 if (sch->driver && sch->driver->verify)
191 sch->driver->verify(sch);
192 }
193 } else {
194 /* trigger path verification. */
195 if (sch->driver && sch->driver->verify)
196 sch->driver->verify(sch);
197 else if (sch->lpm == mask)
198 goto out_unreg; 143 goto out_unreg;
199 }
200
201 spin_unlock_irq(sch->lock); 144 spin_unlock_irq(sch->lock);
202 return 0; 145 return 0;
203 146
@@ -211,15 +154,18 @@ out_unreg:
211void chsc_chp_offline(struct chp_id chpid) 154void chsc_chp_offline(struct chp_id chpid)
212{ 155{
213 char dbf_txt[15]; 156 char dbf_txt[15];
157 struct chp_link link;
214 158
215 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 159 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216 CIO_TRACE_EVENT(2, dbf_txt); 160 CIO_TRACE_EVENT(2, dbf_txt);
217 161
218 if (chp_get_status(chpid) <= 0) 162 if (chp_get_status(chpid) <= 0)
219 return; 163 return;
164 memset(&link, 0, sizeof(struct chp_link));
165 link.chpid = chpid;
220 /* Wait until previous actions have settled. */ 166 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path(); 167 css_wait_for_slow_path();
222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 168 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
223} 169}
224 170
225static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 171static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
@@ -242,67 +188,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
242 return 0; 188 return 0;
243} 189}
244 190
245struct res_acc_data {
246 struct chp_id chpid;
247 u32 fla_mask;
248 u16 fla;
249};
250
251static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 struct res_acc_data *data)
253{
254 int i;
255 int mask;
256
257 for (i = 0; i < 8; i++) {
258 mask = 0x80 >> i;
259 if (!(ssd->path_mask & mask))
260 continue;
261 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262 continue;
263 if ((ssd->fla_valid_mask & mask) &&
264 ((ssd->fla[i] & data->fla_mask) != data->fla))
265 continue;
266 return mask;
267 }
268 return 0;
269}
270
271static int __s390_process_res_acc(struct subchannel *sch, void *data) 191static int __s390_process_res_acc(struct subchannel *sch, void *data)
272{ 192{
273 int chp_mask, old_lpm;
274 struct res_acc_data *res_data = data;
275
276 spin_lock_irq(sch->lock); 193 spin_lock_irq(sch->lock);
277 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 194 if (sch->driver && sch->driver->chp_event)
278 if (chp_mask == 0) 195 sch->driver->chp_event(sch, data, CHP_ONLINE);
279 goto out;
280 if (stsch(sch->schid, &sch->schib))
281 goto out;
282 old_lpm = sch->lpm;
283 sch->lpm = ((sch->schib.pmcw.pim &
284 sch->schib.pmcw.pam &
285 sch->schib.pmcw.pom)
286 | chp_mask) & sch->opm;
287 if (!old_lpm && sch->lpm)
288 device_trigger_reprobe(sch);
289 else if (sch->driver && sch->driver->verify)
290 sch->driver->verify(sch);
291out:
292 spin_unlock_irq(sch->lock); 196 spin_unlock_irq(sch->lock);
293 197
294 return 0; 198 return 0;
295} 199}
296 200
297static void s390_process_res_acc (struct res_acc_data *res_data) 201static void s390_process_res_acc(struct chp_link *link)
298{ 202{
299 char dbf_txt[15]; 203 char dbf_txt[15];
300 204
301 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, 205 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
302 res_data->chpid.id); 206 link->chpid.id);
303 CIO_TRACE_EVENT( 2, dbf_txt); 207 CIO_TRACE_EVENT( 2, dbf_txt);
304 if (res_data->fla != 0) { 208 if (link->fla != 0) {
305 sprintf(dbf_txt, "fla%x", res_data->fla); 209 sprintf(dbf_txt, "fla%x", link->fla);
306 CIO_TRACE_EVENT( 2, dbf_txt); 210 CIO_TRACE_EVENT( 2, dbf_txt);
307 } 211 }
308 /* Wait until previous actions have settled. */ 212 /* Wait until previous actions have settled. */
@@ -315,7 +219,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
315 * will we have to do. 219 * will we have to do.
316 */ 220 */
317 for_each_subchannel_staged(__s390_process_res_acc, 221 for_each_subchannel_staged(__s390_process_res_acc,
318 s390_process_res_acc_new_sch, res_data); 222 s390_process_res_acc_new_sch, link);
319} 223}
320 224
321static int 225static int
@@ -388,7 +292,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
388 292
389static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 293static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
390{ 294{
391 struct res_acc_data res_data; 295 struct chp_link link;
392 struct chp_id chpid; 296 struct chp_id chpid;
393 int status; 297 int status;
394 298
@@ -404,18 +308,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
404 chp_new(chpid); 308 chp_new(chpid);
405 else if (!status) 309 else if (!status)
406 return; 310 return;
407 memset(&res_data, 0, sizeof(struct res_acc_data)); 311 memset(&link, 0, sizeof(struct chp_link));
408 res_data.chpid = chpid; 312 link.chpid = chpid;
409 if ((sei_area->vf & 0xc0) != 0) { 313 if ((sei_area->vf & 0xc0) != 0) {
410 res_data.fla = sei_area->fla; 314 link.fla = sei_area->fla;
411 if ((sei_area->vf & 0xc0) == 0xc0) 315 if ((sei_area->vf & 0xc0) == 0xc0)
412 /* full link address */ 316 /* full link address */
413 res_data.fla_mask = 0xffff; 317 link.fla_mask = 0xffff;
414 else 318 else
415 /* link address */ 319 /* link address */
416 res_data.fla_mask = 0xff00; 320 link.fla_mask = 0xff00;
417 } 321 }
418 s390_process_res_acc(&res_data); 322 s390_process_res_acc(&link);
419} 323}
420 324
421struct chp_config_data { 325struct chp_config_data {
@@ -480,17 +384,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
480 } 384 }
481} 385}
482 386
483void chsc_process_crw(void) 387static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
484{ 388{
485 struct chsc_sei_area *sei_area; 389 struct chsc_sei_area *sei_area;
486 390
391 if (overflow) {
392 css_schedule_eval_all();
393 return;
394 }
395 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
396 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
397 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
398 crw0->erc, crw0->rsid);
487 if (!sei_page) 399 if (!sei_page)
488 return; 400 return;
489 /* Access to sei_page is serialized through machine check handler 401 /* Access to sei_page is serialized through machine check handler
490 * thread, so no need for locking. */ 402 * thread, so no need for locking. */
491 sei_area = sei_page; 403 sei_area = sei_page;
492 404
493 CIO_TRACE_EVENT( 2, "prcss"); 405 CIO_TRACE_EVENT(2, "prcss");
494 do { 406 do {
495 memset(sei_area, 0, sizeof(*sei_area)); 407 memset(sei_area, 0, sizeof(*sei_area));
496 sei_area->request.length = 0x0010; 408 sei_area->request.length = 0x0010;
@@ -509,114 +421,36 @@ void chsc_process_crw(void)
509 } while (sei_area->flags & 0x80); 421 } while (sei_area->flags & 0x80);
510} 422}
511 423
512static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513{
514 struct schib schib;
515
516 if (stsch_err(schid, &schib))
517 /* We're through */
518 return -ENXIO;
519
520 /* Put it on the slow path. */
521 css_schedule_eval(schid);
522 return 0;
523}
524
525
526static int __chp_add(struct subchannel *sch, void *data)
527{
528 int i, mask;
529 struct chp_id *chpid = data;
530
531 spin_lock_irq(sch->lock);
532 for (i=0; i<8; i++) {
533 mask = 0x80 >> i;
534 if ((sch->schib.pmcw.pim & mask) &&
535 (sch->schib.pmcw.chpid[i] == chpid->id))
536 break;
537 }
538 if (i==8) {
539 spin_unlock_irq(sch->lock);
540 return 0;
541 }
542 if (stsch(sch->schid, &sch->schib)) {
543 spin_unlock_irq(sch->lock);
544 css_schedule_eval(sch->schid);
545 return 0;
546 }
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
549 sch->schib.pmcw.pom)
550 | mask) & sch->opm;
551
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
554
555 spin_unlock_irq(sch->lock);
556
557 return 0;
558}
559
560void chsc_chp_online(struct chp_id chpid) 424void chsc_chp_online(struct chp_id chpid)
561{ 425{
562 char dbf_txt[15]; 426 char dbf_txt[15];
427 struct chp_link link;
563 428
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 429 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt); 430 CIO_TRACE_EVENT(2, dbf_txt);
566 431
567 if (chp_get_status(chpid) != 0) { 432 if (chp_get_status(chpid) != 0) {
433 memset(&link, 0, sizeof(struct chp_link));
434 link.chpid = chpid;
568 /* Wait until previous actions have settled. */ 435 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path(); 436 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 437 for_each_subchannel_staged(__s390_process_res_acc, NULL,
571 &chpid); 438 &link);
572 } 439 }
573} 440}
574 441
575static void __s390_subchannel_vary_chpid(struct subchannel *sch, 442static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on) 443 struct chp_id chpid, int on)
577{ 444{
578 int chp, old_lpm;
579 int mask;
580 unsigned long flags; 445 unsigned long flags;
446 struct chp_link link;
581 447
448 memset(&link, 0, sizeof(struct chp_link));
449 link.chpid = chpid;
582 spin_lock_irqsave(sch->lock, flags); 450 spin_lock_irqsave(sch->lock, flags);
583 old_lpm = sch->lpm; 451 if (sch->driver && sch->driver->chp_event)
584 for (chp = 0; chp < 8; chp++) { 452 sch->driver->chp_event(sch, &link,
585 mask = 0x80 >> chp; 453 on ? CHP_VARY_ON : CHP_VARY_OFF);
586 if (!(sch->ssd_info.path_mask & mask))
587 continue;
588 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589 continue;
590
591 if (on) {
592 sch->opm |= mask;
593 sch->lpm |= mask;
594 if (!old_lpm)
595 device_trigger_reprobe(sch);
596 else if (sch->driver && sch->driver->verify)
597 sch->driver->verify(sch);
598 break;
599 }
600 sch->opm &= ~mask;
601 sch->lpm &= ~mask;
602 if (check_for_io_on_path(sch, mask)) {
603 if (device_is_online(sch))
604 /* Path verification is done after killing. */
605 device_kill_io(sch);
606 else {
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch);
609 /* Re-start path verification. */
610 if (sch->driver && sch->driver->verify)
611 sch->driver->verify(sch);
612 }
613 } else if (!sch->lpm) {
614 if (device_trigger_verify(sch) != 0)
615 css_schedule_eval(sch->schid);
616 } else if (sch->driver && sch->driver->verify)
617 sch->driver->verify(sch);
618 break;
619 }
620 spin_unlock_irqrestore(sch->lock, flags); 454 spin_unlock_irqrestore(sch->lock, flags);
621} 455}
622 456
@@ -656,6 +490,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
656 */ 490 */
657int chsc_chp_vary(struct chp_id chpid, int on) 491int chsc_chp_vary(struct chp_id chpid, int on)
658{ 492{
493 struct chp_link link;
494
495 memset(&link, 0, sizeof(struct chp_link));
496 link.chpid = chpid;
659 /* Wait until previous actions have settled. */ 497 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path(); 498 css_wait_for_slow_path();
661 /* 499 /*
@@ -664,10 +502,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
664 502
665 if (on) 503 if (on)
666 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 504 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
667 __s390_vary_chpid_on, &chpid); 505 __s390_vary_chpid_on, &link);
668 else 506 else
669 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 507 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
670 NULL, &chpid); 508 NULL, &link);
671 509
672 return 0; 510 return 0;
673} 511}
@@ -797,23 +635,33 @@ chsc_secm(struct channel_subsystem *css, int enable)
797 return ret; 635 return ret;
798} 636}
799 637
800int chsc_determine_channel_path_description(struct chp_id chpid, 638int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
801 struct channel_path_desc *desc) 639 int c, int m,
640 struct chsc_response_struct *resp)
802{ 641{
803 int ccode, ret; 642 int ccode, ret;
804 643
805 struct { 644 struct {
806 struct chsc_header request; 645 struct chsc_header request;
807 u32 : 24; 646 u32 : 2;
647 u32 m : 1;
648 u32 c : 1;
649 u32 fmt : 4;
650 u32 cssid : 8;
651 u32 : 4;
652 u32 rfmt : 4;
808 u32 first_chpid : 8; 653 u32 first_chpid : 8;
809 u32 : 24; 654 u32 : 24;
810 u32 last_chpid : 8; 655 u32 last_chpid : 8;
811 u32 zeroes1; 656 u32 zeroes1;
812 struct chsc_header response; 657 struct chsc_header response;
813 u32 zeroes2; 658 u8 data[PAGE_SIZE - 20];
814 struct channel_path_desc desc;
815 } __attribute__ ((packed)) *scpd_area; 659 } __attribute__ ((packed)) *scpd_area;
816 660
661 if ((rfmt == 1) && !css_general_characteristics.fcs)
662 return -EINVAL;
663 if ((rfmt == 2) && !css_general_characteristics.cib)
664 return -EINVAL;
817 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 665 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
818 if (!scpd_area) 666 if (!scpd_area)
819 return -ENOMEM; 667 return -ENOMEM;
@@ -821,8 +669,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
821 scpd_area->request.length = 0x0010; 669 scpd_area->request.length = 0x0010;
822 scpd_area->request.code = 0x0002; 670 scpd_area->request.code = 0x0002;
823 671
672 scpd_area->cssid = chpid.cssid;
824 scpd_area->first_chpid = chpid.id; 673 scpd_area->first_chpid = chpid.id;
825 scpd_area->last_chpid = chpid.id; 674 scpd_area->last_chpid = chpid.id;
675 scpd_area->m = m;
676 scpd_area->c = c;
677 scpd_area->fmt = fmt;
678 scpd_area->rfmt = rfmt;
826 679
827 ccode = chsc(scpd_area); 680 ccode = chsc(scpd_area);
828 if (ccode > 0) { 681 if (ccode > 0) {
@@ -833,8 +686,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
833 ret = chsc_error_from_response(scpd_area->response.code); 686 ret = chsc_error_from_response(scpd_area->response.code);
834 if (ret == 0) 687 if (ret == 0)
835 /* Success. */ 688 /* Success. */
836 memcpy(desc, &scpd_area->desc, 689 memcpy(resp, &scpd_area->response, scpd_area->response.length);
837 sizeof(struct channel_path_desc));
838 else 690 else
839 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 691 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840 scpd_area->response.code); 692 scpd_area->response.code);
@@ -842,6 +694,25 @@ out:
842 free_page((unsigned long)scpd_area); 694 free_page((unsigned long)scpd_area);
843 return ret; 695 return ret;
844} 696}
697EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
698
699int chsc_determine_base_channel_path_desc(struct chp_id chpid,
700 struct channel_path_desc *desc)
701{
702 struct chsc_response_struct *chsc_resp;
703 int ret;
704
705 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
706 if (!chsc_resp)
707 return -ENOMEM;
708 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
709 if (ret)
710 goto out_free;
711 memcpy(desc, &chsc_resp->data, chsc_resp->length);
712out_free:
713 kfree(chsc_resp);
714 return ret;
715}
845 716
846static void 717static void
847chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 718chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
@@ -937,15 +808,23 @@ out:
937 808
938int __init chsc_alloc_sei_area(void) 809int __init chsc_alloc_sei_area(void)
939{ 810{
811 int ret;
812
940 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 813 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
941 if (!sei_page) 814 if (!sei_page) {
942 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 815 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943 "chsc machine checks!\n"); 816 "chsc machine checks!\n");
944 return (sei_page ? 0 : -ENOMEM); 817 return -ENOMEM;
818 }
819 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
820 if (ret)
821 kfree(sei_page);
822 return ret;
945} 823}
946 824
947void __init chsc_free_sei_area(void) 825void __init chsc_free_sei_area(void)
948{ 826{
827 s390_unregister_crw_handler(CRW_RSC_CSS);
949 kfree(sei_page); 828 kfree(sei_page);
950} 829}
951 830
@@ -1043,3 +922,52 @@ exit:
1043 922
1044EXPORT_SYMBOL_GPL(css_general_characteristics); 923EXPORT_SYMBOL_GPL(css_general_characteristics);
1045EXPORT_SYMBOL_GPL(css_chsc_characteristics); 924EXPORT_SYMBOL_GPL(css_chsc_characteristics);
925
926int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
927{
928 struct {
929 struct chsc_header request;
930 unsigned int rsvd0;
931 unsigned int op : 8;
932 unsigned int rsvd1 : 8;
933 unsigned int ctrl : 16;
934 unsigned int rsvd2[5];
935 struct chsc_header response;
936 unsigned int rsvd3[7];
937 } __attribute__ ((packed)) *rr;
938 int rc;
939
940 memset(page, 0, PAGE_SIZE);
941 rr = page;
942 rr->request.length = 0x0020;
943 rr->request.code = 0x0033;
944 rr->op = op;
945 rr->ctrl = ctrl;
946 rc = chsc(rr);
947 if (rc)
948 return -EIO;
949 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
950 return rc;
951}
952
953int chsc_sstpi(void *page, void *result, size_t size)
954{
955 struct {
956 struct chsc_header request;
957 unsigned int rsvd0[3];
958 struct chsc_header response;
959 char data[size];
960 } __attribute__ ((packed)) *rr;
961 int rc;
962
963 memset(page, 0, PAGE_SIZE);
964 rr = page;
965 rr->request.length = 0x0010;
966 rr->request.code = 0x0038;
967 rc = chsc(rr);
968 if (rc)
969 return -EIO;
970 memcpy(result, &rr->data, size);
971 return (rr->response.code == 0x0001) ? 0 : -EIO;
972}
973
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1f5db1e69b9..ba59bceace98 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/chpid.h> 6#include <asm/chpid.h>
7#include "schid.h" 7#include <asm/chsc.h>
8#include <asm/schid.h>
8 9
9#define CHSC_SDA_OC_MSS 0x2 10#define CHSC_SDA_OC_MSS 0x2
10 11
@@ -36,14 +37,15 @@ struct channel_path_desc {
36 37
37struct channel_path; 38struct channel_path;
38 39
39extern void chsc_process_crw(void);
40
41struct css_general_char { 40struct css_general_char {
42 u64 : 41; 41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
43 u32 aif : 1; /* bit 41 */ 44 u32 aif : 1; /* bit 41 */
44 u32 : 3; 45 u32 : 3;
45 u32 mcss : 1; /* bit 45 */ 46 u32 mcss : 1; /* bit 45 */
46 u32 : 2; 47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
47 u32 ext_mb : 1; /* bit 48 */ 49 u32 ext_mb : 1; /* bit 48 */
48 u32 : 7; 50 u32 : 7;
49 u32 aif_tdd : 1; /* bit 56 */ 51 u32 aif_tdd : 1; /* bit 56 */
@@ -51,7 +53,11 @@ struct css_general_char {
51 u32 qebsm : 1; /* bit 58 */ 53 u32 qebsm : 1; /* bit 58 */
52 u32 : 8; 54 u32 : 8;
53 u32 aif_osa : 1; /* bit 67 */ 55 u32 aif_osa : 1; /* bit 67 */
54 u32 : 28; 56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
55}__attribute__((packed)); 61}__attribute__((packed));
56 62
57struct css_chsc_char { 63struct css_chsc_char {
@@ -78,7 +84,6 @@ struct chsc_ssd_info {
78extern int chsc_get_ssd_info(struct subchannel_id schid, 84extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd); 85 struct chsc_ssd_info *ssd);
80extern int chsc_determine_css_characteristics(void); 86extern int chsc_determine_css_characteristics(void);
81extern int css_characteristics_avail;
82extern int chsc_alloc_sei_area(void); 87extern int chsc_alloc_sei_area(void);
83extern void chsc_free_sei_area(void); 88extern void chsc_free_sei_area(void);
84 89
@@ -87,10 +92,15 @@ struct channel_subsystem;
87extern int chsc_secm(struct channel_subsystem *, int); 92extern int chsc_secm(struct channel_subsystem *, int);
88 93
89int chsc_chp_vary(struct chp_id chpid, int on); 94int chsc_chp_vary(struct chp_id chpid, int on);
90int chsc_determine_channel_path_description(struct chp_id chpid, 95int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
91 struct channel_path_desc *desc); 96 int c, int m,
97 struct chsc_response_struct *resp);
98int chsc_determine_base_channel_path_desc(struct chp_id chpid,
99 struct channel_path_desc *desc);
92void chsc_chp_online(struct chp_id chpid); 100void chsc_chp_online(struct chp_id chpid);
93void chsc_chp_offline(struct chp_id chpid); 101void chsc_chp_offline(struct chp_id chpid);
94int chsc_get_channel_measurement_chars(struct channel_path *chp); 102int chsc_get_channel_measurement_chars(struct channel_path *chp);
95 103
104int chsc_error_from_response(int response);
105
96#endif 106#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000000..91ca87aa9f97
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,820 @@
1/*
2 * Driver for s390 chsc subchannels
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 *
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/miscdevice.h>
13
14#include <asm/cio.h>
15#include <asm/chsc.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "chsc_sch.h"
22#include "ioasm.h"
23
24static debug_info_t *chsc_debug_msg_id;
25static debug_info_t *chsc_debug_log_id;
26
27#define CHSC_MSG(imp, args...) do { \
28 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
29 } while (0)
30
31#define CHSC_LOG(imp, txt) do { \
32 debug_text_event(chsc_debug_log_id, imp , txt); \
33 } while (0)
34
35static void CHSC_LOG_HEX(int level, void *data, int length)
36{
37 while (length > 0) {
38 debug_event(chsc_debug_log_id, level, data, length);
39 length -= chsc_debug_log_id->buf_size;
40 data += chsc_debug_log_id->buf_size;
41 }
42}
43
44MODULE_AUTHOR("IBM Corporation");
45MODULE_DESCRIPTION("driver for s390 chsc subchannels");
46MODULE_LICENSE("GPL");
47
48static void chsc_subchannel_irq(struct subchannel *sch)
49{
50 struct chsc_private *private = sch->private;
51 struct chsc_request *request = private->request;
52 struct irb *irb = (struct irb *)__LC_IRB;
53
54 CHSC_LOG(4, "irb");
55 CHSC_LOG_HEX(4, irb, sizeof(*irb));
56 /* Copy irb to provided request and set done. */
57 if (!request) {
58 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
59 sch->schid.ssid, sch->schid.sch_no);
60 return;
61 }
62 private->request = NULL;
63 memcpy(&request->irb, irb, sizeof(*irb));
64 stsch(sch->schid, &sch->schib);
65 complete(&request->completion);
66 put_device(&sch->dev);
67}
68
69static int chsc_subchannel_probe(struct subchannel *sch)
70{
71 struct chsc_private *private;
72 int ret;
73
74 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
75 sch->schid.ssid, sch->schid.sch_no);
76 sch->isc = CHSC_SCH_ISC;
77 private = kzalloc(sizeof(*private), GFP_KERNEL);
78 if (!private)
79 return -ENOMEM;
80 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
81 if (ret) {
82 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
83 sch->schid.ssid, sch->schid.sch_no, ret);
84 kfree(private);
85 } else {
86 sch->private = private;
87 if (sch->dev.uevent_suppress) {
88 sch->dev.uevent_suppress = 0;
89 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
90 }
91 }
92 return ret;
93}
94
95static int chsc_subchannel_remove(struct subchannel *sch)
96{
97 struct chsc_private *private;
98
99 cio_disable_subchannel(sch);
100 private = sch->private;
101 sch->private = NULL;
102 if (private->request) {
103 complete(&private->request->completion);
104 put_device(&sch->dev);
105 }
106 kfree(private);
107 return 0;
108}
109
110static void chsc_subchannel_shutdown(struct subchannel *sch)
111{
112 cio_disable_subchannel(sch);
113}
114
115static struct css_device_id chsc_subchannel_ids[] = {
116 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
117 { /* end of list */ },
118};
119MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
120
121static struct css_driver chsc_subchannel_driver = {
122 .owner = THIS_MODULE,
123 .subchannel_type = chsc_subchannel_ids,
124 .irq = chsc_subchannel_irq,
125 .probe = chsc_subchannel_probe,
126 .remove = chsc_subchannel_remove,
127 .shutdown = chsc_subchannel_shutdown,
128 .name = "chsc_subchannel",
129};
130
131static int __init chsc_init_dbfs(void)
132{
133 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
134 16 * sizeof(long));
135 if (!chsc_debug_msg_id)
136 goto out;
137 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
138 debug_set_level(chsc_debug_msg_id, 2);
139 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
140 if (!chsc_debug_log_id)
141 goto out;
142 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
143 debug_set_level(chsc_debug_log_id, 2);
144 return 0;
145out:
146 if (chsc_debug_msg_id)
147 debug_unregister(chsc_debug_msg_id);
148 return -ENOMEM;
149}
150
151static void chsc_remove_dbfs(void)
152{
153 debug_unregister(chsc_debug_log_id);
154 debug_unregister(chsc_debug_msg_id);
155}
156
157static int __init chsc_init_sch_driver(void)
158{
159 return css_driver_register(&chsc_subchannel_driver);
160}
161
162static void chsc_cleanup_sch_driver(void)
163{
164 css_driver_unregister(&chsc_subchannel_driver);
165}
166
167static DEFINE_SPINLOCK(chsc_lock);
168
169static int chsc_subchannel_match_next_free(struct device *dev, void *data)
170{
171 struct subchannel *sch = to_subchannel(dev);
172
173 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
174}
175
176static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
177{
178 struct device *dev;
179
180 dev = driver_find_device(&chsc_subchannel_driver.drv,
181 sch ? &sch->dev : NULL, NULL,
182 chsc_subchannel_match_next_free);
183 return dev ? to_subchannel(dev) : NULL;
184}
185
186/**
187 * chsc_async() - try to start a chsc request asynchronously
188 * @chsc_area: request to be started
189 * @request: request structure to associate
190 *
191 * Tries to start a chsc request on one of the existing chsc subchannels.
192 * Returns:
193 * %0 if the request was performed synchronously
194 * %-EINPROGRESS if the request was successfully started
195 * %-EBUSY if all chsc subchannels are busy
196 * %-ENODEV if no chsc subchannels are available
197 * Context:
198 * interrupts disabled, chsc_lock held
199 */
200static int chsc_async(struct chsc_async_area *chsc_area,
201 struct chsc_request *request)
202{
203 int cc;
204 struct chsc_private *private;
205 struct subchannel *sch = NULL;
206 int ret = -ENODEV;
207 char dbf[10];
208
209 chsc_area->header.key = PAGE_DEFAULT_KEY;
210 while ((sch = chsc_get_next_subchannel(sch))) {
211 spin_lock(sch->lock);
212 private = sch->private;
213 if (private->request) {
214 spin_unlock(sch->lock);
215 ret = -EBUSY;
216 continue;
217 }
218 chsc_area->header.sid = sch->schid;
219 CHSC_LOG(2, "schid");
220 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
221 cc = chsc(chsc_area);
222 sprintf(dbf, "cc:%d", cc);
223 CHSC_LOG(2, dbf);
224 switch (cc) {
225 case 0:
226 ret = 0;
227 break;
228 case 1:
229 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
230 ret = -EINPROGRESS;
231 private->request = request;
232 break;
233 case 2:
234 ret = -EBUSY;
235 break;
236 default:
237 ret = -ENODEV;
238 }
239 spin_unlock(sch->lock);
240 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
241 sch->schid.ssid, sch->schid.sch_no, cc);
242 if (ret == -EINPROGRESS)
243 return -EINPROGRESS;
244 put_device(&sch->dev);
245 if (ret == 0)
246 return 0;
247 }
248 return ret;
249}
250
251static void chsc_log_command(struct chsc_async_area *chsc_area)
252{
253 char dbf[10];
254
255 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
256 CHSC_LOG(0, dbf);
257 CHSC_LOG_HEX(0, chsc_area, 32);
258}
259
260static int chsc_examine_irb(struct chsc_request *request)
261{
262 int backed_up;
263
264 if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
265 return -EIO;
266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
268 if (scsw_cstat(&request->irb.scsw) == 0)
269 return 0;
270 if (!backed_up)
271 return 0;
272 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
273 return -EIO;
274 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
275 return -EPERM;
276 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
277 return -EAGAIN;
278 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
279 return -EAGAIN;
280 return -EIO;
281}
282
283static int chsc_ioctl_start(void __user *user_area)
284{
285 struct chsc_request *request;
286 struct chsc_async_area *chsc_area;
287 int ret;
288 char dbf[10];
289
290 if (!css_general_characteristics.dynio)
291 /* It makes no sense to try. */
292 return -EOPNOTSUPP;
293 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
294 if (!chsc_area)
295 return -ENOMEM;
296 request = kzalloc(sizeof(*request), GFP_KERNEL);
297 if (!request) {
298 ret = -ENOMEM;
299 goto out_free;
300 }
301 init_completion(&request->completion);
302 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
303 ret = -EFAULT;
304 goto out_free;
305 }
306 chsc_log_command(chsc_area);
307 spin_lock_irq(&chsc_lock);
308 ret = chsc_async(chsc_area, request);
309 spin_unlock_irq(&chsc_lock);
310 if (ret == -EINPROGRESS) {
311 wait_for_completion(&request->completion);
312 ret = chsc_examine_irb(request);
313 }
314 /* copy area back to user */
315 if (!ret)
316 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
317 ret = -EFAULT;
318out_free:
319 sprintf(dbf, "ret:%d", ret);
320 CHSC_LOG(0, dbf);
321 kfree(request);
322 free_page((unsigned long)chsc_area);
323 return ret;
324}
325
326static int chsc_ioctl_info_channel_path(void __user *user_cd)
327{
328 struct chsc_chp_cd *cd;
329 int ret, ccode;
330 struct {
331 struct chsc_header request;
332 u32 : 2;
333 u32 m : 1;
334 u32 : 1;
335 u32 fmt1 : 4;
336 u32 cssid : 8;
337 u32 : 8;
338 u32 first_chpid : 8;
339 u32 : 24;
340 u32 last_chpid : 8;
341 u32 : 32;
342 struct chsc_header response;
343 u8 data[PAGE_SIZE - 20];
344 } __attribute__ ((packed)) *scpcd_area;
345
346 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
347 if (!scpcd_area)
348 return -ENOMEM;
349 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
350 if (!cd) {
351 ret = -ENOMEM;
352 goto out_free;
353 }
354 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
355 ret = -EFAULT;
356 goto out_free;
357 }
358 scpcd_area->request.length = 0x0010;
359 scpcd_area->request.code = 0x0028;
360 scpcd_area->m = cd->m;
361 scpcd_area->fmt1 = cd->fmt;
362 scpcd_area->cssid = cd->chpid.cssid;
363 scpcd_area->first_chpid = cd->chpid.id;
364 scpcd_area->last_chpid = cd->chpid.id;
365
366 ccode = chsc(scpcd_area);
367 if (ccode != 0) {
368 ret = -EIO;
369 goto out_free;
370 }
371 if (scpcd_area->response.code != 0x0001) {
372 ret = -EIO;
373 CHSC_MSG(0, "scpcd: response code=%x\n",
374 scpcd_area->response.code);
375 goto out_free;
376 }
377 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
378 if (copy_to_user(user_cd, cd, sizeof(*cd)))
379 ret = -EFAULT;
380 else
381 ret = 0;
382out_free:
383 kfree(cd);
384 free_page((unsigned long)scpcd_area);
385 return ret;
386}
387
388static int chsc_ioctl_info_cu(void __user *user_cd)
389{
390 struct chsc_cu_cd *cd;
391 int ret, ccode;
392 struct {
393 struct chsc_header request;
394 u32 : 2;
395 u32 m : 1;
396 u32 : 1;
397 u32 fmt1 : 4;
398 u32 cssid : 8;
399 u32 : 8;
400 u32 first_cun : 8;
401 u32 : 24;
402 u32 last_cun : 8;
403 u32 : 32;
404 struct chsc_header response;
405 u8 data[PAGE_SIZE - 20];
406 } __attribute__ ((packed)) *scucd_area;
407
408 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
409 if (!scucd_area)
410 return -ENOMEM;
411 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
412 if (!cd) {
413 ret = -ENOMEM;
414 goto out_free;
415 }
416 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
417 ret = -EFAULT;
418 goto out_free;
419 }
420 scucd_area->request.length = 0x0010;
421 scucd_area->request.code = 0x0028;
422 scucd_area->m = cd->m;
423 scucd_area->fmt1 = cd->fmt;
424 scucd_area->cssid = cd->cssid;
425 scucd_area->first_cun = cd->cun;
426 scucd_area->last_cun = cd->cun;
427
428 ccode = chsc(scucd_area);
429 if (ccode != 0) {
430 ret = -EIO;
431 goto out_free;
432 }
433 if (scucd_area->response.code != 0x0001) {
434 ret = -EIO;
435 CHSC_MSG(0, "scucd: response code=%x\n",
436 scucd_area->response.code);
437 goto out_free;
438 }
439 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
440 if (copy_to_user(user_cd, cd, sizeof(*cd)))
441 ret = -EFAULT;
442 else
443 ret = 0;
444out_free:
445 kfree(cd);
446 free_page((unsigned long)scucd_area);
447 return ret;
448}
449
450static int chsc_ioctl_info_sch_cu(void __user *user_cud)
451{
452 struct chsc_sch_cud *cud;
453 int ret, ccode;
454 struct {
455 struct chsc_header request;
456 u32 : 2;
457 u32 m : 1;
458 u32 : 5;
459 u32 fmt1 : 4;
460 u32 : 2;
461 u32 ssid : 2;
462 u32 first_sch : 16;
463 u32 : 8;
464 u32 cssid : 8;
465 u32 last_sch : 16;
466 u32 : 32;
467 struct chsc_header response;
468 u8 data[PAGE_SIZE - 20];
469 } __attribute__ ((packed)) *sscud_area;
470
471 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
472 if (!sscud_area)
473 return -ENOMEM;
474 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
475 if (!cud) {
476 ret = -ENOMEM;
477 goto out_free;
478 }
479 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
480 ret = -EFAULT;
481 goto out_free;
482 }
483 sscud_area->request.length = 0x0010;
484 sscud_area->request.code = 0x0006;
485 sscud_area->m = cud->schid.m;
486 sscud_area->fmt1 = cud->fmt;
487 sscud_area->ssid = cud->schid.ssid;
488 sscud_area->first_sch = cud->schid.sch_no;
489 sscud_area->cssid = cud->schid.cssid;
490 sscud_area->last_sch = cud->schid.sch_no;
491
492 ccode = chsc(sscud_area);
493 if (ccode != 0) {
494 ret = -EIO;
495 goto out_free;
496 }
497 if (sscud_area->response.code != 0x0001) {
498 ret = -EIO;
499 CHSC_MSG(0, "sscud: response code=%x\n",
500 sscud_area->response.code);
501 goto out_free;
502 }
503 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
504 if (copy_to_user(user_cud, cud, sizeof(*cud)))
505 ret = -EFAULT;
506 else
507 ret = 0;
508out_free:
509 kfree(cud);
510 free_page((unsigned long)sscud_area);
511 return ret;
512}
513
514static int chsc_ioctl_conf_info(void __user *user_ci)
515{
516 struct chsc_conf_info *ci;
517 int ret, ccode;
518 struct {
519 struct chsc_header request;
520 u32 : 2;
521 u32 m : 1;
522 u32 : 1;
523 u32 fmt1 : 4;
524 u32 cssid : 8;
525 u32 : 6;
526 u32 ssid : 2;
527 u32 : 8;
528 u64 : 64;
529 struct chsc_header response;
530 u8 data[PAGE_SIZE - 20];
531 } __attribute__ ((packed)) *sci_area;
532
533 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
534 if (!sci_area)
535 return -ENOMEM;
536 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
537 if (!ci) {
538 ret = -ENOMEM;
539 goto out_free;
540 }
541 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
542 ret = -EFAULT;
543 goto out_free;
544 }
545 sci_area->request.length = 0x0010;
546 sci_area->request.code = 0x0012;
547 sci_area->m = ci->id.m;
548 sci_area->fmt1 = ci->fmt;
549 sci_area->cssid = ci->id.cssid;
550 sci_area->ssid = ci->id.ssid;
551
552 ccode = chsc(sci_area);
553 if (ccode != 0) {
554 ret = -EIO;
555 goto out_free;
556 }
557 if (sci_area->response.code != 0x0001) {
558 ret = -EIO;
559 CHSC_MSG(0, "sci: response code=%x\n",
560 sci_area->response.code);
561 goto out_free;
562 }
563 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
564 if (copy_to_user(user_ci, ci, sizeof(*ci)))
565 ret = -EFAULT;
566 else
567 ret = 0;
568out_free:
569 kfree(ci);
570 free_page((unsigned long)sci_area);
571 return ret;
572}
573
574static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
575{
576 struct chsc_comp_list *ccl;
577 int ret, ccode;
578 struct {
579 struct chsc_header request;
580 u32 ctype : 8;
581 u32 : 4;
582 u32 fmt : 4;
583 u32 : 16;
584 u64 : 64;
585 u32 list_parm[2];
586 u64 : 64;
587 struct chsc_header response;
588 u8 data[PAGE_SIZE - 36];
589 } __attribute__ ((packed)) *sccl_area;
590 struct {
591 u32 m : 1;
592 u32 : 31;
593 u32 cssid : 8;
594 u32 : 16;
595 u32 chpid : 8;
596 } __attribute__ ((packed)) *chpid_parm;
597 struct {
598 u32 f_cssid : 8;
599 u32 l_cssid : 8;
600 u32 : 16;
601 u32 res;
602 } __attribute__ ((packed)) *cssids_parm;
603
604 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
605 if (!sccl_area)
606 return -ENOMEM;
607 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
608 if (!ccl) {
609 ret = -ENOMEM;
610 goto out_free;
611 }
612 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
613 ret = -EFAULT;
614 goto out_free;
615 }
616 sccl_area->request.length = 0x0020;
617 sccl_area->request.code = 0x0030;
618 sccl_area->fmt = ccl->req.fmt;
619 sccl_area->ctype = ccl->req.ctype;
620 switch (sccl_area->ctype) {
621 case CCL_CU_ON_CHP:
622 case CCL_IOP_CHP:
623 chpid_parm = (void *)&sccl_area->list_parm;
624 chpid_parm->m = ccl->req.chpid.m;
625 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
626 chpid_parm->chpid = ccl->req.chpid.chp.id;
627 break;
628 case CCL_CSS_IMG:
629 case CCL_CSS_IMG_CONF_CHAR:
630 cssids_parm = (void *)&sccl_area->list_parm;
631 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
632 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
633 break;
634 }
635 ccode = chsc(sccl_area);
636 if (ccode != 0) {
637 ret = -EIO;
638 goto out_free;
639 }
640 if (sccl_area->response.code != 0x0001) {
641 ret = -EIO;
642 CHSC_MSG(0, "sccl: response code=%x\n",
643 sccl_area->response.code);
644 goto out_free;
645 }
646 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
647 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
648 ret = -EFAULT;
649 else
650 ret = 0;
651out_free:
652 kfree(ccl);
653 free_page((unsigned long)sccl_area);
654 return ret;
655}
656
657static int chsc_ioctl_chpd(void __user *user_chpd)
658{
659 struct chsc_cpd_info *chpd;
660 int ret;
661
662 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
663 if (!chpd)
664 return -ENOMEM;
665 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
666 ret = -EFAULT;
667 goto out_free;
668 }
669 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
670 chpd->rfmt, chpd->c, chpd->m,
671 &chpd->chpdb);
672 if (ret)
673 goto out_free;
674 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
675 ret = -EFAULT;
676out_free:
677 kfree(chpd);
678 return ret;
679}
680
681static int chsc_ioctl_dcal(void __user *user_dcal)
682{
683 struct chsc_dcal *dcal;
684 int ret, ccode;
685 struct {
686 struct chsc_header request;
687 u32 atype : 8;
688 u32 : 4;
689 u32 fmt : 4;
690 u32 : 16;
691 u32 res0[2];
692 u32 list_parm[2];
693 u32 res1[2];
694 struct chsc_header response;
695 u8 data[PAGE_SIZE - 36];
696 } __attribute__ ((packed)) *sdcal_area;
697
698 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
699 if (!sdcal_area)
700 return -ENOMEM;
701 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
702 if (!dcal) {
703 ret = -ENOMEM;
704 goto out_free;
705 }
706 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
707 ret = -EFAULT;
708 goto out_free;
709 }
710 sdcal_area->request.length = 0x0020;
711 sdcal_area->request.code = 0x0034;
712 sdcal_area->atype = dcal->req.atype;
713 sdcal_area->fmt = dcal->req.fmt;
714 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
715 sizeof(sdcal_area->list_parm));
716
717 ccode = chsc(sdcal_area);
718 if (ccode != 0) {
719 ret = -EIO;
720 goto out_free;
721 }
722 if (sdcal_area->response.code != 0x0001) {
723 ret = -EIO;
724 CHSC_MSG(0, "sdcal: response code=%x\n",
725 sdcal_area->response.code);
726 goto out_free;
727 }
728 memcpy(&dcal->sdcal, &sdcal_area->response,
729 sdcal_area->response.length);
730 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
731 ret = -EFAULT;
732 else
733 ret = 0;
734out_free:
735 kfree(dcal);
736 free_page((unsigned long)sdcal_area);
737 return ret;
738}
739
740static long chsc_ioctl(struct file *filp, unsigned int cmd,
741 unsigned long arg)
742{
743 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
744 switch (cmd) {
745 case CHSC_START:
746 return chsc_ioctl_start((void __user *)arg);
747 case CHSC_INFO_CHANNEL_PATH:
748 return chsc_ioctl_info_channel_path((void __user *)arg);
749 case CHSC_INFO_CU:
750 return chsc_ioctl_info_cu((void __user *)arg);
751 case CHSC_INFO_SCH_CU:
752 return chsc_ioctl_info_sch_cu((void __user *)arg);
753 case CHSC_INFO_CI:
754 return chsc_ioctl_conf_info((void __user *)arg);
755 case CHSC_INFO_CCL:
756 return chsc_ioctl_conf_comp_list((void __user *)arg);
757 case CHSC_INFO_CPD:
758 return chsc_ioctl_chpd((void __user *)arg);
759 case CHSC_INFO_DCAL:
760 return chsc_ioctl_dcal((void __user *)arg);
761 default: /* unknown ioctl number */
762 return -ENOIOCTLCMD;
763 }
764}
765
766static const struct file_operations chsc_fops = {
767 .owner = THIS_MODULE,
768 .unlocked_ioctl = chsc_ioctl,
769 .compat_ioctl = chsc_ioctl,
770};
771
772static struct miscdevice chsc_misc_device = {
773 .minor = MISC_DYNAMIC_MINOR,
774 .name = "chsc",
775 .fops = &chsc_fops,
776};
777
778static int __init chsc_misc_init(void)
779{
780 return misc_register(&chsc_misc_device);
781}
782
783static void chsc_misc_cleanup(void)
784{
785 misc_deregister(&chsc_misc_device);
786}
787
788static int __init chsc_sch_init(void)
789{
790 int ret;
791
792 ret = chsc_init_dbfs();
793 if (ret)
794 return ret;
795 isc_register(CHSC_SCH_ISC);
796 ret = chsc_init_sch_driver();
797 if (ret)
798 goto out_dbf;
799 ret = chsc_misc_init();
800 if (ret)
801 goto out_driver;
802 return ret;
803out_driver:
804 chsc_cleanup_sch_driver();
805out_dbf:
806 isc_unregister(CHSC_SCH_ISC);
807 chsc_remove_dbfs();
808 return ret;
809}
810
811static void __exit chsc_sch_exit(void)
812{
813 chsc_misc_cleanup();
814 chsc_cleanup_sch_driver();
815 isc_unregister(CHSC_SCH_ISC);
816 chsc_remove_dbfs();
817}
818
819module_init(chsc_sch_init);
820module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000000..589ebfad6aad
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
1#ifndef _CHSC_SCH_H
2#define _CHSC_SCH_H
3
4struct chsc_request {
5 struct completion completion;
6 struct irb irb;
7};
8
9struct chsc_private {
10 struct chsc_request *request;
11};
12
13#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b32d7eb3d81a..33bff8fec7d1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -24,7 +24,9 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/isc.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
29#include <asm/fcx.h>
28#include "cio.h" 30#include "cio.h"
29#include "css.h" 31#include "css.h"
30#include "chsc.h" 32#include "chsc.h"
@@ -72,7 +74,6 @@ out_unregister:
72 debug_unregister(cio_debug_trace_id); 74 debug_unregister(cio_debug_trace_id);
73 if (cio_debug_crw_id) 75 if (cio_debug_crw_id)
74 debug_unregister(cio_debug_crw_id); 76 debug_unregister(cio_debug_crw_id);
75 printk(KERN_WARNING"cio: could not initialize debugging\n");
76 return -1; 77 return -1;
77} 78}
78 79
@@ -128,7 +129,7 @@ cio_tpi(void)
128 local_bh_disable(); 129 local_bh_disable();
129 irq_enter (); 130 irq_enter ();
130 spin_lock(sch->lock); 131 spin_lock(sch->lock);
131 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 132 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
132 if (sch->driver && sch->driver->irq) 133 if (sch->driver && sch->driver->irq)
133 sch->driver->irq(sch); 134 sch->driver->irq(sch);
134 spin_unlock(sch->lock); 135 spin_unlock(sch->lock);
@@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
167{ 168{
168 char dbf_txt[15]; 169 char dbf_txt[15];
169 int ccode; 170 int ccode;
170 struct orb *orb; 171 union orb *orb;
171 172
172 CIO_TRACE_EVENT(4, "stIO"); 173 CIO_TRACE_EVENT(4, "stIO");
173 CIO_TRACE_EVENT(4, sch->dev.bus_id); 174 CIO_TRACE_EVENT(4, sch->dev.bus_id);
174 175
175 orb = &to_io_private(sch)->orb; 176 orb = &to_io_private(sch)->orb;
176 /* sch is always under 2G. */ 177 /* sch is always under 2G. */
177 orb->intparm = (u32)(addr_t)sch; 178 orb->cmd.intparm = (u32)(addr_t)sch;
178 orb->fmt = 1; 179 orb->cmd.fmt = 1;
179 180
180 orb->pfch = sch->options.prefetch == 0; 181 orb->cmd.pfch = sch->options.prefetch == 0;
181 orb->spnd = sch->options.suspend; 182 orb->cmd.spnd = sch->options.suspend;
182 orb->ssic = sch->options.suspend && sch->options.inter; 183 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
183 orb->lpm = (lpm != 0) ? lpm : sch->lpm; 184 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
184#ifdef CONFIG_64BIT 185#ifdef CONFIG_64BIT
185 /* 186 /*
186 * for 64 bit we always support 64 bit IDAWs with 4k page size only 187 * for 64 bit we always support 64 bit IDAWs with 4k page size only
187 */ 188 */
188 orb->c64 = 1; 189 orb->cmd.c64 = 1;
189 orb->i2k = 0; 190 orb->cmd.i2k = 0;
190#endif 191#endif
191 orb->key = key >> 4; 192 orb->cmd.key = key >> 4;
192 /* issue "Start Subchannel" */ 193 /* issue "Start Subchannel" */
193 orb->cpa = (__u32) __pa(cpa); 194 orb->cmd.cpa = (__u32) __pa(cpa);
194 ccode = ssch(sch->schid, orb); 195 ccode = ssch(sch->schid, orb);
195 196
196 /* process condition code */ 197 /* process condition code */
@@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
202 /* 203 /*
203 * initialize device status information 204 * initialize device status information
204 */ 205 */
205 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; 206 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
206 return 0; 207 return 0;
207 case 1: /* status pending */ 208 case 1: /* status pending */
208 case 2: /* busy */ 209 case 2: /* busy */
@@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch)
237 238
238 switch (ccode) { 239 switch (ccode) {
239 case 0: 240 case 0:
240 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; 241 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
241 return 0; 242 return 0;
242 case 1: 243 case 1:
243 return -EBUSY; 244 return -EBUSY;
@@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch)
277 278
278 switch (ccode) { 279 switch (ccode) {
279 case 0: 280 case 0:
280 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; 281 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
281 return 0; 282 return 0;
282 case 1: /* status pending */ 283 case 1: /* status pending */
283 case 2: /* busy */ 284 case 2: /* busy */
@@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch)
312 313
313 switch (ccode) { 314 switch (ccode) {
314 case 0: 315 case 0:
315 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; 316 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
316 return 0; 317 return 0;
317 default: /* device not operational */ 318 default: /* device not operational */
318 return -ENODEV; 319 return -ENODEV;
@@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch)
387 return ret; 388 return ret;
388} 389}
389 390
390/* 391/**
391 * Enable subchannel. 392 * cio_enable_subchannel - enable a subchannel.
393 * @sch: subchannel to be enabled
394 * @intparm: interruption parameter to set
392 */ 395 */
393int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 396int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
394{ 397{
@@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
434 CIO_TRACE_EVENT (2, dbf_txt); 437 CIO_TRACE_EVENT (2, dbf_txt);
435 return ret; 438 return ret;
436} 439}
440EXPORT_SYMBOL_GPL(cio_enable_subchannel);
437 441
438/* 442/**
439 * Disable subchannel. 443 * cio_disable_subchannel - disable a subchannel.
444 * @sch: subchannel to disable
440 */ 445 */
441int 446int cio_disable_subchannel(struct subchannel *sch)
442cio_disable_subchannel (struct subchannel *sch)
443{ 447{
444 char dbf_txt[15]; 448 char dbf_txt[15];
445 int ccode; 449 int ccode;
@@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch)
455 if (ccode == 3) /* Not operational. */ 459 if (ccode == 3) /* Not operational. */
456 return -ENODEV; 460 return -ENODEV;
457 461
458 if (sch->schib.scsw.actl != 0) 462 if (scsw_actl(&sch->schib.scsw) != 0)
459 /* 463 /*
460 * the disable function must not be called while there are 464 * the disable function must not be called while there are
461 * requests pending for completion ! 465 * requests pending for completion !
@@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch)
484 CIO_TRACE_EVENT (2, dbf_txt); 488 CIO_TRACE_EVENT (2, dbf_txt);
485 return ret; 489 return ret;
486} 490}
491EXPORT_SYMBOL_GPL(cio_disable_subchannel);
487 492
488int cio_create_sch_lock(struct subchannel *sch) 493int cio_create_sch_lock(struct subchannel *sch)
489{ 494{
@@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch)
494 return 0; 499 return 0;
495} 500}
496 501
497/* 502static int cio_check_devno_blacklisted(struct subchannel *sch)
498 * cio_validate_subchannel() 503{
504 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
505 /*
506 * This device must not be known to Linux. So we simply
507 * say that there is no device and return ENODEV.
508 */
509 CIO_MSG_EVENT(6, "Blacklisted device detected "
510 "at devno %04X, subchannel set %x\n",
511 sch->schib.pmcw.dev, sch->schid.ssid);
512 return -ENODEV;
513 }
514 return 0;
515}
516
517static int cio_validate_io_subchannel(struct subchannel *sch)
518{
519 /* Initialization for io subchannels. */
520 if (!css_sch_is_valid(&sch->schib))
521 return -ENODEV;
522
523 /* Devno is valid. */
524 return cio_check_devno_blacklisted(sch);
525}
526
527static int cio_validate_msg_subchannel(struct subchannel *sch)
528{
529 /* Initialization for message subchannels. */
530 if (!css_sch_is_valid(&sch->schib))
531 return -ENODEV;
532
533 /* Devno is valid. */
534 return cio_check_devno_blacklisted(sch);
535}
536
537/**
538 * cio_validate_subchannel - basic validation of subchannel
539 * @sch: subchannel structure to be filled out
540 * @schid: subchannel id
499 * 541 *
500 * Find out subchannel type and initialize struct subchannel. 542 * Find out subchannel type and initialize struct subchannel.
501 * Return codes: 543 * Return codes:
502 * SUBCHANNEL_TYPE_IO for a normal io subchannel 544 * 0 on success
503 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
504 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
505 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
506 * -ENXIO for non-defined subchannels 545 * -ENXIO for non-defined subchannels
507 * -ENODEV for subchannels with invalid device number or blacklisted devices 546 * -ENODEV for invalid subchannels or blacklisted devices
547 * -EIO for subchannels in an invalid subchannel set
508 */ 548 */
509int 549int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
510cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
511{ 550{
512 char dbf_txt[15]; 551 char dbf_txt[15];
513 int ccode; 552 int ccode;
514 int err; 553 int err;
515 554
516 sprintf (dbf_txt, "valsch%x", schid.sch_no); 555 sprintf(dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 556 CIO_TRACE_EVENT(4, dbf_txt);
518 557
519 /* Nuke all fields. */ 558 /* Nuke all fields. */
520 memset(sch, 0, sizeof(struct subchannel)); 559 memset(sch, 0, sizeof(struct subchannel));
@@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
546 /* Copy subchannel type from path management control word. */ 585 /* Copy subchannel type from path management control word. */
547 sch->st = sch->schib.pmcw.st; 586 sch->st = sch->schib.pmcw.st;
548 587
549 /* 588 switch (sch->st) {
550 * ... just being curious we check for non I/O subchannels 589 case SUBCHANNEL_TYPE_IO:
551 */ 590 err = cio_validate_io_subchannel(sch);
552 if (sch->st != 0) { 591 break;
553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " 592 case SUBCHANNEL_TYPE_MSG:
554 "non-I/O subchannel type %04X\n", 593 err = cio_validate_msg_subchannel(sch);
555 sch->schid.ssid, sch->schid.sch_no, sch->st); 594 break;
556 /* We stop here for non-io subchannels. */ 595 default:
557 err = sch->st; 596 err = 0;
558 goto out;
559 }
560
561 /* Initialization for io subchannels. */
562 if (!css_sch_is_valid(&sch->schib)) {
563 err = -ENODEV;
564 goto out;
565 } 597 }
566 598 if (err)
567 /* Devno is valid. */
568 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
569 /*
570 * This device must not be known to Linux. So we simply
571 * say that there is no device and return ENODEV.
572 */
573 CIO_MSG_EVENT(6, "Blacklisted device detected "
574 "at devno %04X, subchannel set %x\n",
575 sch->schib.pmcw.dev, sch->schid.ssid);
576 err = -ENODEV;
577 goto out; 599 goto out;
578 }
579 if (cio_is_console(sch->schid)) {
580 sch->opm = 0xff;
581 sch->isc = 1;
582 } else {
583 sch->opm = chp_get_sch_opm(sch);
584 sch->isc = 3;
585 }
586 sch->lpm = sch->schib.pmcw.pam & sch->opm;
587
588 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
589 "- PIM = %02X, PAM = %02X, POM = %02X\n",
590 sch->schib.pmcw.dev, sch->schid.ssid,
591 sch->schid.sch_no, sch->schib.pmcw.pim,
592 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
593 600
594 /* 601 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
595 * We now have to initially ... 602 sch->schid.ssid, sch->schid.sch_no, sch->st);
596 * ... enable "concurrent sense"
597 * ... enable "multipath mode" if more than one
598 * CHPID is available. This is done regardless
599 * whether multiple paths are available for us.
600 */
601 sch->schib.pmcw.csense = 1; /* concurrent sense */
602 sch->schib.pmcw.ena = 0;
603 if ((sch->lpm & (sch->lpm - 1)) != 0)
604 sch->schib.pmcw.mp = 1; /* multipath mode */
605 /* clean up possible residual cmf stuff */
606 sch->schib.pmcw.mme = 0;
607 sch->schib.pmcw.mbfc = 0;
608 sch->schib.pmcw.mbi = 0;
609 sch->schib.mba = 0;
610 return 0; 603 return 0;
611out: 604out:
612 if (!cio_is_console(schid)) 605 if (!cio_is_console(schid))
@@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs)
647 */ 640 */
648 if (tpi_info->adapter_IO == 1 && 641 if (tpi_info->adapter_IO == 1 &&
649 tpi_info->int_type == IO_INTERRUPT_TYPE) { 642 tpi_info->int_type == IO_INTERRUPT_TYPE) {
650 do_adapter_IO(); 643 do_adapter_IO(tpi_info->isc);
651 continue; 644 continue;
652 } 645 }
653 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 646 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -706,9 +699,9 @@ void wait_cons_dev(void)
706 if (!console_subchannel_in_use) 699 if (!console_subchannel_in_use)
707 return; 700 return;
708 701
709 /* disable all but isc 1 (console device) */ 702 /* disable all but the console isc */
710 __ctl_store (save_cr6, 6, 6); 703 __ctl_store (save_cr6, 6, 6);
711 cr6 = 0x40000000; 704 cr6 = 1UL << (31 - CONSOLE_ISC);
712 __ctl_load (cr6, 6, 6); 705 __ctl_load (cr6, 6, 6);
713 706
714 do { 707 do {
@@ -716,7 +709,7 @@ void wait_cons_dev(void)
716 if (!cio_tpi()) 709 if (!cio_tpi())
717 cpu_relax(); 710 cpu_relax();
718 spin_lock(console_subchannel.lock); 711 spin_lock(console_subchannel.lock);
719 } while (console_subchannel.schib.scsw.actl != 0); 712 } while (console_subchannel.schib.scsw.cmd.actl != 0);
720 /* 713 /*
721 * restore previous isc value 714 * restore previous isc value
722 */ 715 */
@@ -761,7 +754,6 @@ cio_get_console_sch_no(void)
761 /* unlike in 2.4, we cannot autoprobe here, since 754 /* unlike in 2.4, we cannot autoprobe here, since
762 * the channel subsystem is not fully initialized. 755 * the channel subsystem is not fully initialized.
763 * With some luck, the HWC console can take over */ 756 * With some luck, the HWC console can take over */
764 printk(KERN_WARNING "cio: No ccw console found!\n");
765 return -1; 757 return -1;
766 } 758 }
767 return console_irq; 759 return console_irq;
@@ -778,6 +770,7 @@ cio_probe_console(void)
778 sch_no = cio_get_console_sch_no(); 770 sch_no = cio_get_console_sch_no();
779 if (sch_no == -1) { 771 if (sch_no == -1) {
780 console_subchannel_in_use = 0; 772 console_subchannel_in_use = 0;
773 printk(KERN_WARNING "cio: No ccw console found!\n");
781 return ERR_PTR(-ENODEV); 774 return ERR_PTR(-ENODEV);
782 } 775 }
783 memset(&console_subchannel, 0, sizeof(struct subchannel)); 776 memset(&console_subchannel, 0, sizeof(struct subchannel));
@@ -790,15 +783,15 @@ cio_probe_console(void)
790 } 783 }
791 784
792 /* 785 /*
793 * enable console I/O-interrupt subclass 1 786 * enable console I/O-interrupt subclass
794 */ 787 */
795 ctl_set_bit(6, 30); 788 isc_register(CONSOLE_ISC);
796 console_subchannel.isc = 1; 789 console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
797 console_subchannel.schib.pmcw.isc = 1;
798 console_subchannel.schib.pmcw.intparm = 790 console_subchannel.schib.pmcw.intparm =
799 (u32)(addr_t)&console_subchannel; 791 (u32)(addr_t)&console_subchannel;
800 ret = cio_modify(&console_subchannel); 792 ret = cio_modify(&console_subchannel);
801 if (ret) { 793 if (ret) {
794 isc_unregister(CONSOLE_ISC);
802 console_subchannel_in_use = 0; 795 console_subchannel_in_use = 0;
803 return ERR_PTR(ret); 796 return ERR_PTR(ret);
804 } 797 }
@@ -810,7 +803,7 @@ cio_release_console(void)
810{ 803{
811 console_subchannel.schib.pmcw.intparm = 0; 804 console_subchannel.schib.pmcw.intparm = 0;
812 cio_modify(&console_subchannel); 805 cio_modify(&console_subchannel);
813 ctl_clear_bit(6, 24); 806 isc_unregister(CONSOLE_ISC);
814 console_subchannel_in_use = 0; 807 console_subchannel_in_use = 0;
815} 808}
816 809
@@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs)
864} 857}
865 858
866static int 859static int
867__clear_subchannel_easy(struct subchannel_id schid) 860__clear_io_subchannel_easy(struct subchannel_id schid)
868{ 861{
869 int retry; 862 int retry;
870 863
@@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
883 return -EBUSY; 876 return -EBUSY;
884} 877}
885 878
879static void __clear_chsc_subchannel_easy(void)
880{
881 /* It seems we can only wait for a bit here :/ */
882 udelay_reset(100);
883}
884
886static int pgm_check_occured; 885static int pgm_check_occured;
887 886
888static void cio_reset_pgm_check_handler(void) 887static void cio_reset_pgm_check_handler(void)
@@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
921 case -ENODEV: 920 case -ENODEV:
922 break; 921 break;
923 default: /* -EBUSY */ 922 default: /* -EBUSY */
924 if (__clear_subchannel_easy(schid)) 923 switch (schib.pmcw.st) {
925 break; /* give up... */ 924 case SUBCHANNEL_TYPE_IO:
925 if (__clear_io_subchannel_easy(schid))
926 goto out; /* give up... */
927 break;
928 case SUBCHANNEL_TYPE_CHSC:
929 __clear_chsc_subchannel_easy();
930 break;
931 default:
932 /* No default clear strategy */
933 break;
934 }
926 stsch(schid, &schib); 935 stsch(schid, &schib);
927 __disable_subchannel_easy(schid, &schib); 936 __disable_subchannel_easy(schid, &schib);
928 } 937 }
938out:
929 return 0; 939 return 0;
930} 940}
931 941
@@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 iplinfo->is_qdio = schib.pmcw.qf; 1078 iplinfo->is_qdio = schib.pmcw.qf;
1069 return 0; 1079 return 0;
1070} 1080}
1081
1082/**
1083 * cio_tm_start_key - perform start function
1084 * @sch: subchannel on which to perform the start function
1085 * @tcw: transport-command word to be started
1086 * @lpm: mask of paths to use
1087 * @key: storage key to use for storage access
1088 *
1089 * Start the tcw on the given subchannel. Return zero on success, non-zero
1090 * otherwise.
1091 */
1092int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1093{
1094 int cc;
1095 union orb *orb = &to_io_private(sch)->orb;
1096
1097 memset(orb, 0, sizeof(union orb));
1098 orb->tm.intparm = (u32) (addr_t) sch;
1099 orb->tm.key = key >> 4;
1100 orb->tm.b = 1;
1101 orb->tm.lpm = lpm ? lpm : sch->lpm;
1102 orb->tm.tcw = (u32) (addr_t) tcw;
1103 cc = ssch(sch->schid, orb);
1104 switch (cc) {
1105 case 0:
1106 return 0;
1107 case 1:
1108 case 2:
1109 return -EBUSY;
1110 default:
1111 return cio_start_handle_notoper(sch, lpm);
1112 }
1113}
1114
1115/**
1116 * cio_tm_intrg - perform interrogate function
1117 * @sch - subchannel on which to perform the interrogate function
1118 *
1119 * If the specified subchannel is running in transport-mode, perform the
1120 * interrogate function. Return zero on success, non-zero otherwie.
1121 */
1122int cio_tm_intrg(struct subchannel *sch)
1123{
1124 int cc;
1125
1126 if (!to_io_private(sch)->orb.tm.b)
1127 return -EINVAL;
1128 cc = xsch(sch->schid);
1129 switch (cc) {
1130 case 0:
1131 case 2:
1132 return 0;
1133 case 1:
1134 return -EBUSY;
1135 default:
1136 return -ENODEV;
1137 }
1138}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 6e933aebe013..3b236d20e835 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -3,9 +3,12 @@
3 3
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h>
6#include <asm/chpid.h> 7#include <asm/chpid.h>
8#include <asm/cio.h>
9#include <asm/fcx.h>
10#include <asm/schid.h>
7#include "chsc.h" 11#include "chsc.h"
8#include "schid.h"
9 12
10/* 13/*
11 * path management control word 14 * path management control word
@@ -13,7 +16,7 @@
13struct pmcw { 16struct pmcw {
14 u32 intparm; /* interruption parameter */ 17 u32 intparm; /* interruption parameter */
15 u32 qf : 1; /* qdio facility */ 18 u32 qf : 1; /* qdio facility */
16 u32 res0 : 1; /* reserved zeros */ 19 u32 w : 1;
17 u32 isc : 3; /* interruption sublass */ 20 u32 isc : 3; /* interruption sublass */
18 u32 res5 : 3; /* reserved zeros */ 21 u32 res5 : 3; /* reserved zeros */
19 u32 ena : 1; /* enabled */ 22 u32 ena : 1; /* enabled */
@@ -47,7 +50,7 @@ struct pmcw {
47 */ 50 */
48struct schib { 51struct schib {
49 struct pmcw pmcw; /* path management control word */ 52 struct pmcw pmcw; /* path management control word */
50 struct scsw scsw; /* subchannel status word */ 53 union scsw scsw; /* subchannel status word */
51 __u64 mba; /* measurement block address */ 54 __u64 mba; /* measurement block address */
52 __u8 mda[4]; /* model dependent area */ 55 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 56} __attribute__ ((packed,aligned(4)));
@@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int);
99extern int cio_get_options (struct subchannel *); 102extern int cio_get_options (struct subchannel *);
100extern int cio_modify (struct subchannel *); 103extern int cio_modify (struct subchannel *);
101 104
105int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
106int cio_tm_intrg(struct subchannel *sch);
107
102int cio_create_sch_lock(struct subchannel *); 108int cio_create_sch_lock(struct subchannel *);
103void do_adapter_IO(void); 109void do_adapter_IO(u8 isc);
104void do_IRQ(struct pt_regs *); 110void do_IRQ(struct pt_regs *);
105 111
106/* Use with care. */ 112/* Use with care. */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2808b6833b9e..a90b28c0be57 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev)
341 if (stsch(sch->schid, &sch->schib)) 341 if (stsch(sch->schid, &sch->schib))
342 return -ENODEV; 342 return -ENODEV;
343 343
344 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { 344 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
345 /* Don't copy if a start function is in progress. */ 345 /* Don't copy if a start function is in progress. */
346 if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && 346 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
347 (sch->schib.scsw.actl & 347 (scsw_actl(&sch->schib.scsw) &
348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && 348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
349 (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) 349 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
350 return -EBUSY; 350 return -EBUSY;
351 } 351 }
352 cmb_data = cdev->private->cmb; 352 cmb_data = cdev->private->cmb;
@@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev)
612 free_pages((unsigned long)mem, get_order(size)); 612 free_pages((unsigned long)mem, get_order(size));
613 } else if (!mem) { 613 } else if (!mem) {
614 /* no luck */ 614 /* no luck */
615 printk(KERN_WARNING "cio: failed to allocate area "
616 "for measuring %d subchannels\n",
617 cmb_area.num_channels);
618 ret = -ENOMEM; 615 ret = -ENOMEM;
619 goto out; 616 goto out;
620 } else { 617 } else {
@@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev,
1230 switch (val) { 1227 switch (val) {
1231 case 0: 1228 case 0:
1232 ret = disable_cmf(cdev); 1229 ret = disable_cmf(cdev);
1233 if (ret)
1234 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
1235 break; 1230 break;
1236 case 1: 1231 case 1:
1237 ret = enable_cmf(cdev); 1232 ret = enable_cmf(cdev);
1238 if (ret && ret != -EBUSY)
1239 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
1240 break; 1233 break;
1241 } 1234 }
1242 1235
@@ -1344,8 +1337,7 @@ static int __init init_cmf(void)
1344 * to basic mode. 1337 * to basic mode.
1345 */ 1338 */
1346 if (format == CMF_AUTODETECT) { 1339 if (format == CMF_AUTODETECT) {
1347 if (!css_characteristics_avail || 1340 if (!css_general_characteristics.ext_mb) {
1348 !css_general_characteristics.ext_mb) {
1349 format = CMF_BASIC; 1341 format = CMF_BASIC;
1350 } else { 1342 } else {
1351 format = CMF_EXTENDED; 1343 format = CMF_EXTENDED;
@@ -1365,8 +1357,6 @@ static int __init init_cmf(void)
1365 cmbops = &cmbops_extended; 1357 cmbops = &cmbops_extended;
1366 break; 1358 break;
1367 default: 1359 default:
1368 printk(KERN_ERR "cio: Invalid format %d for channel "
1369 "measurement facility\n", format);
1370 return 1; 1360 return 1;
1371 } 1361 }
1372 1362
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a76956512b2d..46c021d880dc 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 */ 8 */
@@ -14,7 +13,9 @@
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/list.h> 14#include <linux/list.h>
16#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <asm/isc.h>
17 17
18#include "../s390mach.h"
18#include "css.h" 19#include "css.h"
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -30,8 +31,6 @@ static int max_ssid = 0;
30 31
31struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 32struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32 33
33int css_characteristics_avail = 0;
34
35int 34int
36for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 35for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
37{ 36{
@@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid)
121 kfree(sch); 120 kfree(sch);
122 return ERR_PTR(ret); 121 return ERR_PTR(ret);
123 } 122 }
124
125 if (sch->st != SUBCHANNEL_TYPE_IO) {
126 /* For now we ignore all non-io subchannels. */
127 kfree(sch);
128 return ERR_PTR(-EINVAL);
129 }
130
131 /*
132 * Set intparm to subchannel address.
133 * This is fine even on 64bit since the subchannel is always located
134 * under 2G.
135 */
136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
137 ret = cio_modify(sch);
138 if (ret) {
139 kfree(sch->lock);
140 kfree(sch);
141 return ERR_PTR(ret);
142 }
143 return sch; 123 return sch;
144} 124}
145 125
@@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch)
177 return ret; 157 return ret;
178} 158}
179 159
160/**
161 * css_sch_device_unregister - unregister a subchannel
162 * @sch: subchannel to be unregistered
163 */
180void css_sch_device_unregister(struct subchannel *sch) 164void css_sch_device_unregister(struct subchannel *sch)
181{ 165{
182 mutex_lock(&sch->reg_mutex); 166 mutex_lock(&sch->reg_mutex);
183 device_unregister(&sch->dev); 167 if (device_is_registered(&sch->dev))
168 device_unregister(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 169 mutex_unlock(&sch->reg_mutex);
185} 170}
171EXPORT_SYMBOL_GPL(css_sch_device_unregister);
186 172
187static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 173static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
188{ 174{
@@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch)
229 } 215 }
230} 216}
231 217
218static ssize_t type_show(struct device *dev, struct device_attribute *attr,
219 char *buf)
220{
221 struct subchannel *sch = to_subchannel(dev);
222
223 return sprintf(buf, "%01x\n", sch->st);
224}
225
226static DEVICE_ATTR(type, 0444, type_show, NULL);
227
228static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
229 char *buf)
230{
231 struct subchannel *sch = to_subchannel(dev);
232
233 return sprintf(buf, "css:t%01X\n", sch->st);
234}
235
236static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
237
238static struct attribute *subch_attrs[] = {
239 &dev_attr_type.attr,
240 &dev_attr_modalias.attr,
241 NULL,
242};
243
244static struct attribute_group subch_attr_group = {
245 .attrs = subch_attrs,
246};
247
248static struct attribute_group *default_subch_attr_groups[] = {
249 &subch_attr_group,
250 NULL,
251};
252
232static int css_register_subchannel(struct subchannel *sch) 253static int css_register_subchannel(struct subchannel *sch)
233{ 254{
234 int ret; 255 int ret;
@@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch)
237 sch->dev.parent = &channel_subsystems[0]->device; 258 sch->dev.parent = &channel_subsystems[0]->device;
238 sch->dev.bus = &css_bus_type; 259 sch->dev.bus = &css_bus_type;
239 sch->dev.release = &css_subchannel_release; 260 sch->dev.release = &css_subchannel_release;
240 sch->dev.groups = subch_attr_groups; 261 sch->dev.groups = default_subch_attr_groups;
241 /* 262 /*
242 * We don't want to generate uevents for I/O subchannels that don't 263 * We don't want to generate uevents for I/O subchannels that don't
243 * have a working ccw device behind them since they will be 264 * have a working ccw device behind them since they will be
244 * unregistered before they can be used anyway, so we delay the add 265 * unregistered before they can be used anyway, so we delay the add
245 * uevent until after device recognition was successful. 266 * uevent until after device recognition was successful.
267 * Note that we suppress the uevent for all subchannel types;
268 * the subchannel driver can decide itself when it wants to inform
269 * userspace of its existence.
246 */ 270 */
247 if (!cio_is_console(sch->schid)) 271 sch->dev.uevent_suppress = 1;
248 /* Console is special, no need to suppress. */
249 sch->dev.uevent_suppress = 1;
250 css_update_ssd_info(sch); 272 css_update_ssd_info(sch);
251 /* make it known to the system */ 273 /* make it known to the system */
252 ret = css_sch_device_register(sch); 274 ret = css_sch_device_register(sch);
@@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch)
255 sch->schid.ssid, sch->schid.sch_no, ret); 277 sch->schid.ssid, sch->schid.sch_no, ret);
256 return ret; 278 return ret;
257 } 279 }
280 if (!sch->driver) {
281 /*
282 * No driver matched. Generate the uevent now so that
283 * a fitting driver module may be loaded based on the
284 * modalias.
285 */
286 sch->dev.uevent_suppress = 0;
287 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
288 }
258 return ret; 289 return ret;
259} 290}
260 291
261static int css_probe_device(struct subchannel_id schid) 292int css_probe_device(struct subchannel_id schid)
262{ 293{
263 int ret; 294 int ret;
264 struct subchannel *sch; 295 struct subchannel *sch;
@@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib)
301{ 332{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 333 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0; 334 return 0;
335 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
336 return 0;
304 return 1; 337 return 1;
305} 338}
306EXPORT_SYMBOL_GPL(css_sch_is_valid); 339EXPORT_SYMBOL_GPL(css_sch_is_valid);
307 340
308static int css_get_subchannel_status(struct subchannel *sch)
309{
310 struct schib schib;
311
312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
315 return CIO_GONE;
316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
317 return CIO_REVALIDATE;
318 if (!sch->lpm)
319 return CIO_NO_PATH;
320 return CIO_OPER;
321}
322
323static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
324{
325 int event, ret, disc;
326 unsigned long flags;
327 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
328
329 spin_lock_irqsave(sch->lock, flags);
330 disc = device_is_disconnected(sch);
331 if (disc && slow) {
332 /* Disconnected devices are evaluated directly only.*/
333 spin_unlock_irqrestore(sch->lock, flags);
334 return 0;
335 }
336 /* No interrupt after machine check - kill pending timers. */
337 device_kill_pending_timer(sch);
338 if (!disc && !slow) {
339 /* Non-disconnected devices are evaluated on the slow path. */
340 spin_unlock_irqrestore(sch->lock, flags);
341 return -EAGAIN;
342 }
343 event = css_get_subchannel_status(sch);
344 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
345 sch->schid.ssid, sch->schid.sch_no, event,
346 disc ? "disconnected" : "normal",
347 slow ? "slow" : "fast");
348 /* Analyze subchannel status. */
349 action = NONE;
350 switch (event) {
351 case CIO_NO_PATH:
352 if (disc) {
353 /* Check if paths have become available. */
354 action = REPROBE;
355 break;
356 }
357 /* fall through */
358 case CIO_GONE:
359 /* Prevent unwanted effects when opening lock. */
360 cio_disable_subchannel(sch);
361 device_set_disconnected(sch);
362 /* Ask driver what to do with device. */
363 action = UNREGISTER;
364 if (sch->driver && sch->driver->notify) {
365 spin_unlock_irqrestore(sch->lock, flags);
366 ret = sch->driver->notify(sch, event);
367 spin_lock_irqsave(sch->lock, flags);
368 if (ret)
369 action = NONE;
370 }
371 break;
372 case CIO_REVALIDATE:
373 /* Device will be removed, so no notify necessary. */
374 if (disc)
375 /* Reprobe because immediate unregister might block. */
376 action = REPROBE;
377 else
378 action = UNREGISTER_PROBE;
379 break;
380 case CIO_OPER:
381 if (disc)
382 /* Get device operational again. */
383 action = REPROBE;
384 break;
385 }
386 /* Perform action. */
387 ret = 0;
388 switch (action) {
389 case UNREGISTER:
390 case UNREGISTER_PROBE:
391 /* Unregister device (will use subchannel lock). */
392 spin_unlock_irqrestore(sch->lock, flags);
393 css_sch_device_unregister(sch);
394 spin_lock_irqsave(sch->lock, flags);
395
396 /* Reset intparm to zeroes. */
397 sch->schib.pmcw.intparm = 0;
398 cio_modify(sch);
399 break;
400 case REPROBE:
401 device_trigger_reprobe(sch);
402 break;
403 default:
404 break;
405 }
406 spin_unlock_irqrestore(sch->lock, flags);
407 /* Probe if necessary. */
408 if (action == UNREGISTER_PROBE)
409 ret = css_probe_device(sch->schid);
410
411 return ret;
412}
413
414static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 341static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
415{ 342{
416 struct schib schib; 343 struct schib schib;
@@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
429 return css_probe_device(schid); 356 return css_probe_device(schid);
430} 357}
431 358
359static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
360{
361 int ret = 0;
362
363 if (sch->driver) {
364 if (sch->driver->sch_event)
365 ret = sch->driver->sch_event(sch, slow);
366 else
367 dev_dbg(&sch->dev,
368 "Got subchannel machine check but "
369 "no sch_event handler provided.\n");
370 }
371 return ret;
372}
373
432static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 374static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
433{ 375{
434 struct subchannel *sch; 376 struct subchannel *sch;
@@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
596/* 538/*
597 * Called from the machine check handler for subchannel report words. 539 * Called from the machine check handler for subchannel report words.
598 */ 540 */
599void css_process_crw(int rsid1, int rsid2) 541static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
600{ 542{
601 struct subchannel_id mchk_schid; 543 struct subchannel_id mchk_schid;
602 544
603 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 545 if (overflow) {
604 rsid1, rsid2); 546 css_schedule_eval_all();
547 return;
548 }
549 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
550 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
551 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
552 crw0->erc, crw0->rsid);
553 if (crw1)
554 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
555 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
556 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
557 crw1->anc, crw1->erc, crw1->rsid);
605 init_subchannel_id(&mchk_schid); 558 init_subchannel_id(&mchk_schid);
606 mchk_schid.sch_no = rsid1; 559 mchk_schid.sch_no = crw0->rsid;
607 if (rsid2 != 0) 560 if (crw1)
608 mchk_schid.ssid = (rsid2 >> 8) & 3; 561 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
609 562
610 /* 563 /*
611 * Since we are always presented with IPI in the CRW, we have to 564 * Since we are always presented with IPI in the CRW, we have to
612 * use stsch() to find out if the subchannel in question has come 565 * use stsch() to find out if the subchannel in question has come
613 * or gone. 566 * or gone.
@@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
658static void __init 611static void __init
659css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 612css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
660{ 613{
661 if (css_characteristics_avail && css_general_characteristics.mcss) { 614 if (css_general_characteristics.mcss) {
662 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 615 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
663 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 616 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
664 } else { 617 } else {
@@ -795,8 +748,6 @@ init_channel_subsystem (void)
795 ret = chsc_determine_css_characteristics(); 748 ret = chsc_determine_css_characteristics();
796 if (ret == -ENOMEM) 749 if (ret == -ENOMEM)
797 goto out; /* No need to continue. */ 750 goto out; /* No need to continue. */
798 if (ret == 0)
799 css_characteristics_avail = 1;
800 751
801 ret = chsc_alloc_sei_area(); 752 ret = chsc_alloc_sei_area();
802 if (ret) 753 if (ret)
@@ -806,6 +757,10 @@ init_channel_subsystem (void)
806 if (ret) 757 if (ret)
807 goto out; 758 goto out;
808 759
760 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
761 if (ret)
762 goto out;
763
809 if ((ret = bus_register(&css_bus_type))) 764 if ((ret = bus_register(&css_bus_type)))
810 goto out; 765 goto out;
811 766
@@ -836,8 +791,7 @@ init_channel_subsystem (void)
836 ret = device_register(&css->device); 791 ret = device_register(&css->device);
837 if (ret) 792 if (ret)
838 goto out_free_all; 793 goto out_free_all;
839 if (css_characteristics_avail && 794 if (css_chsc_characteristics.secm) {
840 css_chsc_characteristics.secm) {
841 ret = device_create_file(&css->device, 795 ret = device_create_file(&css->device,
842 &dev_attr_cm_enable); 796 &dev_attr_cm_enable);
843 if (ret) 797 if (ret)
@@ -852,7 +806,8 @@ init_channel_subsystem (void)
852 goto out_pseudo; 806 goto out_pseudo;
853 css_init_done = 1; 807 css_init_done = 1;
854 808
855 ctl_set_bit(6, 28); 809 /* Enable default isc for I/O subchannels. */
810 isc_register(IO_SCH_ISC);
856 811
857 for_each_subchannel(__init_channel_subsystem, NULL); 812 for_each_subchannel(__init_channel_subsystem, NULL);
858 return 0; 813 return 0;
@@ -875,7 +830,7 @@ out_unregister:
875 i--; 830 i--;
876 css = channel_subsystems[i]; 831 css = channel_subsystems[i];
877 device_unregister(&css->pseudo_subchannel->dev); 832 device_unregister(&css->pseudo_subchannel->dev);
878 if (css_characteristics_avail && css_chsc_characteristics.secm) 833 if (css_chsc_characteristics.secm)
879 device_remove_file(&css->device, 834 device_remove_file(&css->device,
880 &dev_attr_cm_enable); 835 &dev_attr_cm_enable);
881 device_unregister(&css->device); 836 device_unregister(&css->device);
@@ -883,6 +838,7 @@ out_unregister:
883out_bus: 838out_bus:
884 bus_unregister(&css_bus_type); 839 bus_unregister(&css_bus_type);
885out: 840out:
841 s390_unregister_crw_handler(CRW_RSC_CSS);
886 chsc_free_sei_area(); 842 chsc_free_sei_area();
887 kfree(slow_subchannel_set); 843 kfree(slow_subchannel_set);
888 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 844 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
@@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch)
895 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 851 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
896} 852}
897 853
898/* 854static int css_bus_match(struct device *dev, struct device_driver *drv)
899 * find a driver for a subchannel. They identify by the subchannel
900 * type with the exception that the console subchannel driver has its own
901 * subchannel type although the device is an i/o subchannel
902 */
903static int
904css_bus_match (struct device *dev, struct device_driver *drv)
905{ 855{
906 struct subchannel *sch = to_subchannel(dev); 856 struct subchannel *sch = to_subchannel(dev);
907 struct css_driver *driver = to_cssdriver(drv); 857 struct css_driver *driver = to_cssdriver(drv);
858 struct css_device_id *id;
908 859
909 if (sch->st == driver->subchannel_type) 860 for (id = driver->subchannel_type; id->match_flags; id++) {
910 return 1; 861 if (sch->st == id->type)
862 return 1;
863 }
911 864
912 return 0; 865 return 0;
913} 866}
@@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev)
945 sch->driver->shutdown(sch); 898 sch->driver->shutdown(sch);
946} 899}
947 900
901static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
902{
903 struct subchannel *sch = to_subchannel(dev);
904 int ret;
905
906 ret = add_uevent_var(env, "ST=%01X", sch->st);
907 if (ret)
908 return ret;
909 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
910 return ret;
911}
912
948struct bus_type css_bus_type = { 913struct bus_type css_bus_type = {
949 .name = "css", 914 .name = "css",
950 .match = css_bus_match, 915 .match = css_bus_match,
951 .probe = css_probe, 916 .probe = css_probe,
952 .remove = css_remove, 917 .remove = css_remove,
953 .shutdown = css_shutdown, 918 .shutdown = css_shutdown,
919 .uevent = css_uevent,
954}; 920};
955 921
956/** 922/**
@@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem);
985 951
986MODULE_LICENSE("GPL"); 952MODULE_LICENSE("GPL");
987EXPORT_SYMBOL(css_bus_type); 953EXPORT_SYMBOL(css_bus_type);
988EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index e1913518f354..57ebf120f825 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -9,8 +9,7 @@
9 9
10#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12 12#include <asm/schid.h>
13#include "schid.h"
14 13
15/* 14/*
16 * path grouping stuff 15 * path grouping stuff
@@ -58,20 +57,28 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 57 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 58} __attribute__ ((packed));
60 59
61/*
62 * A css driver handles all subchannels of one type.
63 * Currently, we only care about I/O subchannels (type 0), these
64 * have a ccw_device connected to them.
65 */
66struct subchannel; 60struct subchannel;
61struct chp_link;
62/**
63 * struct css_driver - device driver for subchannels
64 * @owner: owning module
65 * @subchannel_type: subchannel type supported by this driver
66 * @drv: embedded device driver structure
67 * @irq: called on interrupts
68 * @chp_event: called for events affecting a channel path
69 * @sch_event: called for events affecting the subchannel
70 * @probe: function called on probe
71 * @remove: function called on remove
72 * @shutdown: called at device shutdown
73 * @name: name of the device driver
74 */
67struct css_driver { 75struct css_driver {
68 struct module *owner; 76 struct module *owner;
69 unsigned int subchannel_type; 77 struct css_device_id *subchannel_type;
70 struct device_driver drv; 78 struct device_driver drv;
71 void (*irq)(struct subchannel *); 79 void (*irq)(struct subchannel *);
72 int (*notify)(struct subchannel *, int); 80 int (*chp_event)(struct subchannel *, struct chp_link *, int);
73 void (*verify)(struct subchannel *); 81 int (*sch_event)(struct subchannel *, int);
74 void (*termination)(struct subchannel *);
75 int (*probe)(struct subchannel *); 82 int (*probe)(struct subchannel *);
76 int (*remove)(struct subchannel *); 83 int (*remove)(struct subchannel *);
77 void (*shutdown)(struct subchannel *); 84 void (*shutdown)(struct subchannel *);
@@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *); 96extern void css_driver_unregister(struct css_driver *);
90 97
91extern void css_sch_device_unregister(struct subchannel *); 98extern void css_sch_device_unregister(struct subchannel *);
92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 99extern int css_probe_device(struct subchannel_id);
100extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
93extern int css_init_done; 101extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 102int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id, 103 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data); 104 void *), void *data);
97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 105extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
98extern void css_process_crw(int, int);
99extern void css_reiterate_subchannels(void); 106extern void css_reiterate_subchannels(void);
100void css_update_ssd_info(struct subchannel *sch); 107void css_update_ssd_info(struct subchannel *sch);
101 108
@@ -121,20 +128,6 @@ struct channel_subsystem {
121extern struct bus_type css_bus_type; 128extern struct bus_type css_bus_type;
122extern struct channel_subsystem *channel_subsystems[]; 129extern struct channel_subsystem *channel_subsystems[];
123 130
124/* Some helper functions for disconnected state. */
125int device_is_disconnected(struct subchannel *);
126void device_set_disconnected(struct subchannel *);
127void device_trigger_reprobe(struct subchannel *);
128
129/* Helper functions for vary on/off. */
130int device_is_online(struct subchannel *);
131void device_kill_io(struct subchannel *);
132void device_set_intretry(struct subchannel *sch);
133int device_trigger_verify(struct subchannel *sch);
134
135/* Machine check helper function. */
136void device_kill_pending_timer(struct subchannel *);
137
138/* Helper functions to build lists for the slow path. */ 131/* Helper functions to build lists for the slow path. */
139void css_schedule_eval(struct subchannel_id schid); 132void css_schedule_eval(struct subchannel_id schid);
140void css_schedule_eval_all(void); 133void css_schedule_eval_all(void);
@@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *);
145 138
146extern struct workqueue_struct *slow_path_wq; 139extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void); 140void css_wait_for_slow_path(void);
148
149extern struct attribute_group *subch_attr_groups[];
150#endif 141#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e22813db74a2..e818d0c54c09 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -23,7 +22,9 @@
23#include <asm/cio.h> 22#include <asm/cio.h>
24#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
25#include <asm/cmb.h> 24#include <asm/cmb.h>
25#include <asm/isc.h>
26 26
27#include "chp.h"
27#include "cio.h" 28#include "cio.h"
28#include "cio_debug.h" 29#include "cio_debug.h"
29#include "css.h" 30#include "css.h"
@@ -125,19 +126,24 @@ struct bus_type ccw_bus_type;
125static void io_subchannel_irq(struct subchannel *); 126static void io_subchannel_irq(struct subchannel *);
126static int io_subchannel_probe(struct subchannel *); 127static int io_subchannel_probe(struct subchannel *);
127static int io_subchannel_remove(struct subchannel *); 128static int io_subchannel_remove(struct subchannel *);
128static int io_subchannel_notify(struct subchannel *, int);
129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
131static void io_subchannel_shutdown(struct subchannel *); 129static void io_subchannel_shutdown(struct subchannel *);
130static int io_subchannel_sch_event(struct subchannel *, int);
131static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
132 int);
133
134static struct css_device_id io_subchannel_ids[] = {
135 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
136 { /* end of list */ },
137};
138MODULE_DEVICE_TABLE(css, io_subchannel_ids);
132 139
133static struct css_driver io_subchannel_driver = { 140static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
135 .subchannel_type = SUBCHANNEL_TYPE_IO, 142 .subchannel_type = io_subchannel_ids,
136 .name = "io_subchannel", 143 .name = "io_subchannel",
137 .irq = io_subchannel_irq, 144 .irq = io_subchannel_irq,
138 .notify = io_subchannel_notify, 145 .sch_event = io_subchannel_sch_event,
139 .verify = io_subchannel_verify, 146 .chp_event = io_subchannel_chp_event,
140 .termination = io_subchannel_ioterm,
141 .probe = io_subchannel_probe, 147 .probe = io_subchannel_probe,
142 .remove = io_subchannel_remove, 148 .remove = io_subchannel_remove,
143 .shutdown = io_subchannel_shutdown, 149 .shutdown = io_subchannel_shutdown,
@@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
487 ccw_device_set_online(cdev); 493 ccw_device_set_online(cdev);
488 return 0; 494 return 0;
489} 495}
490static void online_store_handle_online(struct ccw_device *cdev, int force) 496static int online_store_handle_online(struct ccw_device *cdev, int force)
491{ 497{
492 int ret; 498 int ret;
493 499
494 ret = online_store_recog_and_online(cdev); 500 ret = online_store_recog_and_online(cdev);
495 if (ret) 501 if (ret)
496 return; 502 return ret;
497 if (force && cdev->private->state == DEV_STATE_BOXED) { 503 if (force && cdev->private->state == DEV_STATE_BOXED) {
498 ret = ccw_device_stlck(cdev); 504 ret = ccw_device_stlck(cdev);
499 if (ret) { 505 if (ret)
500 dev_warn(&cdev->dev, 506 return ret;
501 "ccw_device_stlck returned %d!\n", ret);
502 return;
503 }
504 if (cdev->id.cu_type == 0) 507 if (cdev->id.cu_type == 0)
505 cdev->private->state = DEV_STATE_NOT_OPER; 508 cdev->private->state = DEV_STATE_NOT_OPER;
506 online_store_recog_and_online(cdev); 509 online_store_recog_and_online(cdev);
507 } 510 }
508 511 return 0;
509} 512}
510 513
511static ssize_t online_store (struct device *dev, struct device_attribute *attr, 514static ssize_t online_store (struct device *dev, struct device_attribute *attr,
@@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
538 ret = count; 541 ret = count;
539 break; 542 break;
540 case 1: 543 case 1:
541 online_store_handle_online(cdev, force); 544 ret = online_store_handle_online(cdev, force);
542 ret = count; 545 if (!ret)
546 ret = count;
543 break; 547 break;
544 default: 548 default:
545 ret = -EINVAL; 549 ret = -EINVAL;
@@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
584static DEVICE_ATTR(online, 0644, online_show, online_store); 588static DEVICE_ATTR(online, 0644, online_show, online_store);
585static DEVICE_ATTR(availability, 0444, available_show, NULL); 589static DEVICE_ATTR(availability, 0444, available_show, NULL);
586 590
587static struct attribute * subch_attrs[] = { 591static struct attribute *io_subchannel_attrs[] = {
588 &dev_attr_chpids.attr, 592 &dev_attr_chpids.attr,
589 &dev_attr_pimpampom.attr, 593 &dev_attr_pimpampom.attr,
590 NULL, 594 NULL,
591}; 595};
592 596
593static struct attribute_group subch_attr_group = { 597static struct attribute_group io_subchannel_attr_group = {
594 .attrs = subch_attrs, 598 .attrs = io_subchannel_attrs,
595};
596
597struct attribute_group *subch_attr_groups[] = {
598 &subch_attr_group,
599 NULL,
600}; 599};
601 600
602static struct attribute * ccwdev_attrs[] = { 601static struct attribute * ccwdev_attrs[] = {
@@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch,
790 sch_set_cdev(sch, cdev); 789 sch_set_cdev(sch, cdev);
791 cdev->private->schid = sch->schid; 790 cdev->private->schid = sch->schid;
792 cdev->ccwlock = sch->lock; 791 cdev->ccwlock = sch->lock;
793 device_trigger_reprobe(sch); 792 ccw_device_trigger_reprobe(cdev);
794 spin_unlock_irq(sch->lock); 793 spin_unlock_irq(sch->lock);
795} 794}
796 795
@@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1037 struct ccw_device_private *priv; 1036 struct ccw_device_private *priv;
1038 1037
1039 sch_set_cdev(sch, cdev); 1038 sch_set_cdev(sch, cdev);
1040 sch->driver = &io_subchannel_driver;
1041 cdev->ccwlock = sch->lock; 1039 cdev->ccwlock = sch->lock;
1042 1040
1043 /* Init private data. */ 1041 /* Init private data. */
@@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch)
1122 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1120 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1123} 1121}
1124 1122
1125static int 1123static void io_subchannel_init_fields(struct subchannel *sch)
1126io_subchannel_probe (struct subchannel *sch) 1124{
1125 if (cio_is_console(sch->schid))
1126 sch->opm = 0xff;
1127 else
1128 sch->opm = chp_get_sch_opm(sch);
1129 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1130 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1131
1132 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1133 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1134 sch->schib.pmcw.dev, sch->schid.ssid,
1135 sch->schid.sch_no, sch->schib.pmcw.pim,
1136 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1137 /* Initially set up some fields in the pmcw. */
1138 sch->schib.pmcw.ena = 0;
1139 sch->schib.pmcw.csense = 1; /* concurrent sense */
1140 if ((sch->lpm & (sch->lpm - 1)) != 0)
1141 sch->schib.pmcw.mp = 1; /* multipath mode */
1142 /* clean up possible residual cmf stuff */
1143 sch->schib.pmcw.mme = 0;
1144 sch->schib.pmcw.mbfc = 0;
1145 sch->schib.pmcw.mbi = 0;
1146 sch->schib.mba = 0;
1147}
1148
1149static int io_subchannel_probe(struct subchannel *sch)
1127{ 1150{
1128 struct ccw_device *cdev; 1151 struct ccw_device *cdev;
1129 int rc; 1152 int rc;
@@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch)
1132 1155
1133 cdev = sch_get_cdev(sch); 1156 cdev = sch_get_cdev(sch);
1134 if (cdev) { 1157 if (cdev) {
1158 rc = sysfs_create_group(&sch->dev.kobj,
1159 &io_subchannel_attr_group);
1160 if (rc)
1161 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1162 "attributes for subchannel "
1163 "0.%x.%04x (rc=%d)\n",
1164 sch->schid.ssid, sch->schid.sch_no, rc);
1135 /* 1165 /*
1136 * This subchannel already has an associated ccw_device. 1166 * This subchannel already has an associated ccw_device.
1137 * Register it and exit. This happens for all early 1167 * Throw the delayed uevent for the subchannel, register
1138 * device, e.g. the console. 1168 * the ccw_device and exit. This happens for all early
1169 * devices, e.g. the console.
1139 */ 1170 */
1171 sch->dev.uevent_suppress = 0;
1172 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1140 cdev->dev.groups = ccwdev_attr_groups; 1173 cdev->dev.groups = ccwdev_attr_groups;
1141 device_initialize(&cdev->dev); 1174 device_initialize(&cdev->dev);
1142 ccw_device_register(cdev); 1175 ccw_device_register(cdev);
@@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch)
1152 get_device(&cdev->dev); 1185 get_device(&cdev->dev);
1153 return 0; 1186 return 0;
1154 } 1187 }
1188 io_subchannel_init_fields(sch);
1155 /* 1189 /*
1156 * First check if a fitting device may be found amongst the 1190 * First check if a fitting device may be found amongst the
1157 * disconnected devices or in the orphanage. 1191 * disconnected devices or in the orphanage.
1158 */ 1192 */
1159 dev_id.devno = sch->schib.pmcw.dev; 1193 dev_id.devno = sch->schib.pmcw.dev;
1160 dev_id.ssid = sch->schid.ssid; 1194 dev_id.ssid = sch->schid.ssid;
1195 rc = sysfs_create_group(&sch->dev.kobj,
1196 &io_subchannel_attr_group);
1197 if (rc)
1198 return rc;
1161 /* Allocate I/O subchannel private data. */ 1199 /* Allocate I/O subchannel private data. */
1162 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1200 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1163 GFP_KERNEL | GFP_DMA); 1201 GFP_KERNEL | GFP_DMA);
1164 if (!sch->private) 1202 if (!sch->private) {
1165 return -ENOMEM; 1203 rc = -ENOMEM;
1204 goto out_err;
1205 }
1166 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1206 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1167 if (!cdev) 1207 if (!cdev)
1168 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1208 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch)
1181 } 1221 }
1182 cdev = io_subchannel_create_ccwdev(sch); 1222 cdev = io_subchannel_create_ccwdev(sch);
1183 if (IS_ERR(cdev)) { 1223 if (IS_ERR(cdev)) {
1184 kfree(sch->private); 1224 rc = PTR_ERR(cdev);
1185 return PTR_ERR(cdev); 1225 goto out_err;
1186 } 1226 }
1187 rc = io_subchannel_recog(cdev, sch); 1227 rc = io_subchannel_recog(cdev, sch);
1188 if (rc) { 1228 if (rc) {
@@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch)
1191 spin_unlock_irqrestore(sch->lock, flags); 1231 spin_unlock_irqrestore(sch->lock, flags);
1192 if (cdev->dev.release) 1232 if (cdev->dev.release)
1193 cdev->dev.release(&cdev->dev); 1233 cdev->dev.release(&cdev->dev);
1194 kfree(sch->private); 1234 goto out_err;
1195 } 1235 }
1196 1236 return 0;
1237out_err:
1238 kfree(sch->private);
1239 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1197 return rc; 1240 return rc;
1198} 1241}
1199 1242
@@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch)
1214 ccw_device_unregister(cdev); 1257 ccw_device_unregister(cdev);
1215 put_device(&cdev->dev); 1258 put_device(&cdev->dev);
1216 kfree(sch->private); 1259 kfree(sch->private);
1260 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1217 return 0; 1261 return 0;
1218} 1262}
1219 1263
@@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
1224 cdev = sch_get_cdev(sch); 1268 cdev = sch_get_cdev(sch);
1225 if (!cdev) 1269 if (!cdev)
1226 return 0; 1270 return 0;
1227 if (!cdev->drv) 1271 return ccw_device_notify(cdev, event);
1228 return 0;
1229 if (!cdev->online)
1230 return 0;
1231 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1232} 1272}
1233 1273
1234static void io_subchannel_verify(struct subchannel *sch) 1274static void io_subchannel_verify(struct subchannel *sch)
@@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch)
1240 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1280 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1241} 1281}
1242 1282
1243static void io_subchannel_ioterm(struct subchannel *sch) 1283static int check_for_io_on_path(struct subchannel *sch, int mask)
1244{ 1284{
1245 struct ccw_device *cdev; 1285 int cc;
1246 1286
1247 cdev = sch_get_cdev(sch); 1287 cc = stsch(sch->schid, &sch->schib);
1248 if (!cdev) 1288 if (cc)
1249 return; 1289 return 0;
1250 /* Internal I/O will be retried by the interrupt handler. */ 1290 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1251 if (cdev->private->flags.intretry) 1291 return 1;
1292 return 0;
1293}
1294
1295static void terminate_internal_io(struct subchannel *sch,
1296 struct ccw_device *cdev)
1297{
1298 if (cio_clear(sch)) {
1299 /* Recheck device in case clear failed. */
1300 sch->lpm = 0;
1301 if (cdev->online)
1302 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1303 else
1304 css_schedule_eval(sch->schid);
1252 return; 1305 return;
1306 }
1253 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1307 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1308 /* Request retry of internal operation. */
1309 cdev->private->flags.intretry = 1;
1310 /* Call handler. */
1254 if (cdev->handler) 1311 if (cdev->handler)
1255 cdev->handler(cdev, cdev->private->intparm, 1312 cdev->handler(cdev, cdev->private->intparm,
1256 ERR_PTR(-EIO)); 1313 ERR_PTR(-EIO));
1257} 1314}
1258 1315
1316static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1317{
1318 struct ccw_device *cdev;
1319
1320 cdev = sch_get_cdev(sch);
1321 if (!cdev)
1322 return;
1323 if (check_for_io_on_path(sch, mask)) {
1324 if (cdev->private->state == DEV_STATE_ONLINE)
1325 ccw_device_kill_io(cdev);
1326 else {
1327 terminate_internal_io(sch, cdev);
1328 /* Re-start path verification. */
1329 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1330 }
1331 } else
1332 /* trigger path verification. */
1333 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1334
1335}
1336
1337static int io_subchannel_chp_event(struct subchannel *sch,
1338 struct chp_link *link, int event)
1339{
1340 int mask;
1341
1342 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1343 if (!mask)
1344 return 0;
1345 switch (event) {
1346 case CHP_VARY_OFF:
1347 sch->opm &= ~mask;
1348 sch->lpm &= ~mask;
1349 io_subchannel_terminate_path(sch, mask);
1350 break;
1351 case CHP_VARY_ON:
1352 sch->opm |= mask;
1353 sch->lpm |= mask;
1354 io_subchannel_verify(sch);
1355 break;
1356 case CHP_OFFLINE:
1357 if (stsch(sch->schid, &sch->schib))
1358 return -ENXIO;
1359 if (!css_sch_is_valid(&sch->schib))
1360 return -ENODEV;
1361 io_subchannel_terminate_path(sch, mask);
1362 break;
1363 case CHP_ONLINE:
1364 if (stsch(sch->schid, &sch->schib))
1365 return -ENXIO;
1366 sch->lpm |= mask & sch->opm;
1367 io_subchannel_verify(sch);
1368 break;
1369 }
1370 return 0;
1371}
1372
1259static void 1373static void
1260io_subchannel_shutdown(struct subchannel *sch) 1374io_subchannel_shutdown(struct subchannel *sch)
1261{ 1375{
@@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch)
1285 cio_disable_subchannel(sch); 1399 cio_disable_subchannel(sch);
1286} 1400}
1287 1401
1402static int io_subchannel_get_status(struct subchannel *sch)
1403{
1404 struct schib schib;
1405
1406 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1407 return CIO_GONE;
1408 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1409 return CIO_REVALIDATE;
1410 if (!sch->lpm)
1411 return CIO_NO_PATH;
1412 return CIO_OPER;
1413}
1414
1415static int device_is_disconnected(struct ccw_device *cdev)
1416{
1417 if (!cdev)
1418 return 0;
1419 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1420 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1421}
1422
1423static int recovery_check(struct device *dev, void *data)
1424{
1425 struct ccw_device *cdev = to_ccwdev(dev);
1426 int *redo = data;
1427
1428 spin_lock_irq(cdev->ccwlock);
1429 switch (cdev->private->state) {
1430 case DEV_STATE_DISCONNECTED:
1431 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1432 cdev->private->dev_id.ssid,
1433 cdev->private->dev_id.devno);
1434 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1435 *redo = 1;
1436 break;
1437 case DEV_STATE_DISCONNECTED_SENSE_ID:
1438 *redo = 1;
1439 break;
1440 }
1441 spin_unlock_irq(cdev->ccwlock);
1442
1443 return 0;
1444}
1445
1446static void recovery_work_func(struct work_struct *unused)
1447{
1448 int redo = 0;
1449
1450 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1451 if (redo) {
1452 spin_lock_irq(&recovery_lock);
1453 if (!timer_pending(&recovery_timer)) {
1454 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1455 recovery_phase++;
1456 mod_timer(&recovery_timer, jiffies +
1457 recovery_delay[recovery_phase] * HZ);
1458 }
1459 spin_unlock_irq(&recovery_lock);
1460 } else
1461 CIO_MSG_EVENT(4, "recovery: end\n");
1462}
1463
1464static DECLARE_WORK(recovery_work, recovery_work_func);
1465
1466static void recovery_func(unsigned long data)
1467{
1468 /*
1469 * We can't do our recovery in softirq context and it's not
1470 * performance critical, so we schedule it.
1471 */
1472 schedule_work(&recovery_work);
1473}
1474
1475static void ccw_device_schedule_recovery(void)
1476{
1477 unsigned long flags;
1478
1479 CIO_MSG_EVENT(4, "recovery: schedule\n");
1480 spin_lock_irqsave(&recovery_lock, flags);
1481 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1482 recovery_phase = 0;
1483 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1484 }
1485 spin_unlock_irqrestore(&recovery_lock, flags);
1486}
1487
1488static void device_set_disconnected(struct ccw_device *cdev)
1489{
1490 if (!cdev)
1491 return;
1492 ccw_device_set_timeout(cdev, 0);
1493 cdev->private->flags.fake_irb = 0;
1494 cdev->private->state = DEV_STATE_DISCONNECTED;
1495 if (cdev->online)
1496 ccw_device_schedule_recovery();
1497}
1498
1499static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1500{
1501 int event, ret, disc;
1502 unsigned long flags;
1503 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1504 struct ccw_device *cdev;
1505
1506 spin_lock_irqsave(sch->lock, flags);
1507 cdev = sch_get_cdev(sch);
1508 disc = device_is_disconnected(cdev);
1509 if (disc && slow) {
1510 /* Disconnected devices are evaluated directly only.*/
1511 spin_unlock_irqrestore(sch->lock, flags);
1512 return 0;
1513 }
1514 /* No interrupt after machine check - kill pending timers. */
1515 if (cdev)
1516 ccw_device_set_timeout(cdev, 0);
1517 if (!disc && !slow) {
1518 /* Non-disconnected devices are evaluated on the slow path. */
1519 spin_unlock_irqrestore(sch->lock, flags);
1520 return -EAGAIN;
1521 }
1522 event = io_subchannel_get_status(sch);
1523 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1524 sch->schid.ssid, sch->schid.sch_no, event,
1525 disc ? "disconnected" : "normal",
1526 slow ? "slow" : "fast");
1527 /* Analyze subchannel status. */
1528 action = NONE;
1529 switch (event) {
1530 case CIO_NO_PATH:
1531 if (disc) {
1532 /* Check if paths have become available. */
1533 action = REPROBE;
1534 break;
1535 }
1536 /* fall through */
1537 case CIO_GONE:
1538 /* Prevent unwanted effects when opening lock. */
1539 cio_disable_subchannel(sch);
1540 device_set_disconnected(cdev);
1541 /* Ask driver what to do with device. */
1542 action = UNREGISTER;
1543 spin_unlock_irqrestore(sch->lock, flags);
1544 ret = io_subchannel_notify(sch, event);
1545 spin_lock_irqsave(sch->lock, flags);
1546 if (ret)
1547 action = NONE;
1548 break;
1549 case CIO_REVALIDATE:
1550 /* Device will be removed, so no notify necessary. */
1551 if (disc)
1552 /* Reprobe because immediate unregister might block. */
1553 action = REPROBE;
1554 else
1555 action = UNREGISTER_PROBE;
1556 break;
1557 case CIO_OPER:
1558 if (disc)
1559 /* Get device operational again. */
1560 action = REPROBE;
1561 break;
1562 }
1563 /* Perform action. */
1564 ret = 0;
1565 switch (action) {
1566 case UNREGISTER:
1567 case UNREGISTER_PROBE:
1568 /* Unregister device (will use subchannel lock). */
1569 spin_unlock_irqrestore(sch->lock, flags);
1570 css_sch_device_unregister(sch);
1571 spin_lock_irqsave(sch->lock, flags);
1572
1573 /* Reset intparm to zeroes. */
1574 sch->schib.pmcw.intparm = 0;
1575 cio_modify(sch);
1576 break;
1577 case REPROBE:
1578 ccw_device_trigger_reprobe(cdev);
1579 break;
1580 default:
1581 break;
1582 }
1583 spin_unlock_irqrestore(sch->lock, flags);
1584 /* Probe if necessary. */
1585 if (action == UNREGISTER_PROBE)
1586 ret = css_probe_device(sch->schid);
1587
1588 return ret;
1589}
1590
1288#ifdef CONFIG_CCW_CONSOLE 1591#ifdef CONFIG_CCW_CONSOLE
1289static struct ccw_device console_cdev; 1592static struct ccw_device console_cdev;
1290static struct ccw_device_private console_private; 1593static struct ccw_device_private console_private;
@@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void)
1297 return &ccw_console_lock; 1600 return &ccw_console_lock;
1298} 1601}
1299 1602
1300static int 1603static int ccw_device_console_enable(struct ccw_device *cdev,
1301ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1604 struct subchannel *sch)
1302{ 1605{
1303 int rc; 1606 int rc;
1304 1607
1305 /* Attach subchannel private data. */ 1608 /* Attach subchannel private data. */
1306 sch->private = cio_get_console_priv(); 1609 sch->private = cio_get_console_priv();
1307 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1610 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1611 io_subchannel_init_fields(sch);
1612 sch->driver = &io_subchannel_driver;
1308 /* Initialize the ccw_device structure. */ 1613 /* Initialize the ccw_device structure. */
1309 cdev->dev.parent= &sch->dev; 1614 cdev->dev.parent= &sch->dev;
1310 rc = io_subchannel_recog(cdev, sch); 1615 rc = io_subchannel_recog(cdev, sch);
@@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1515 return sch->schid; 1820 return sch->schid;
1516} 1821}
1517 1822
1518static int recovery_check(struct device *dev, void *data)
1519{
1520 struct ccw_device *cdev = to_ccwdev(dev);
1521 int *redo = data;
1522
1523 spin_lock_irq(cdev->ccwlock);
1524 switch (cdev->private->state) {
1525 case DEV_STATE_DISCONNECTED:
1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1527 cdev->private->dev_id.ssid,
1528 cdev->private->dev_id.devno);
1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1530 *redo = 1;
1531 break;
1532 case DEV_STATE_DISCONNECTED_SENSE_ID:
1533 *redo = 1;
1534 break;
1535 }
1536 spin_unlock_irq(cdev->ccwlock);
1537
1538 return 0;
1539}
1540
1541static void recovery_work_func(struct work_struct *unused)
1542{
1543 int redo = 0;
1544
1545 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1546 if (redo) {
1547 spin_lock_irq(&recovery_lock);
1548 if (!timer_pending(&recovery_timer)) {
1549 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1550 recovery_phase++;
1551 mod_timer(&recovery_timer, jiffies +
1552 recovery_delay[recovery_phase] * HZ);
1553 }
1554 spin_unlock_irq(&recovery_lock);
1555 } else
1556 CIO_MSG_EVENT(4, "recovery: end\n");
1557}
1558
1559static DECLARE_WORK(recovery_work, recovery_work_func);
1560
1561static void recovery_func(unsigned long data)
1562{
1563 /*
1564 * We can't do our recovery in softirq context and it's not
1565 * performance critical, so we schedule it.
1566 */
1567 schedule_work(&recovery_work);
1568}
1569
1570void ccw_device_schedule_recovery(void)
1571{
1572 unsigned long flags;
1573
1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1575 spin_lock_irqsave(&recovery_lock, flags);
1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1577 recovery_phase = 0;
1578 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1579 }
1580 spin_unlock_irqrestore(&recovery_lock, flags);
1581}
1582
1583MODULE_LICENSE("GPL"); 1823MODULE_LICENSE("GPL");
1584EXPORT_SYMBOL(ccw_device_set_online); 1824EXPORT_SYMBOL(ccw_device_set_online);
1585EXPORT_SYMBOL(ccw_device_set_offline); 1825EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index cb08092be39f..9800a8335a3f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
88int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
89int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
90 90
91void ccw_device_schedule_recovery(void);
92
93/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 93void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
118 116
119int ccw_device_stlck(struct ccw_device *); 117int ccw_device_stlck(struct ccw_device *);
120 118
119/* Helper function for machine check handling. */
120void ccw_device_trigger_reprobe(struct ccw_device *);
121void ccw_device_kill_io(struct ccw_device *);
122int ccw_device_notify(struct ccw_device *, int);
123
121/* qdio needs this. */ 124/* qdio needs this. */
122void ccw_device_set_timeout(struct ccw_device *, int); 125void ccw_device_set_timeout(struct ccw_device *, int);
123extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 126extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e268d5a77c12..8b5fe57fb2f3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device_fsm.c 2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling 3 * finite state machine for device handling
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 8 */
@@ -27,65 +26,6 @@
27 26
28static int timeout_log_enabled; 27static int timeout_log_enabled;
29 28
30int
31device_is_online(struct subchannel *sch)
32{
33 struct ccw_device *cdev;
34
35 cdev = sch_get_cdev(sch);
36 if (!cdev)
37 return 0;
38 return (cdev->private->state == DEV_STATE_ONLINE);
39}
40
41int
42device_is_disconnected(struct subchannel *sch)
43{
44 struct ccw_device *cdev;
45
46 cdev = sch_get_cdev(sch);
47 if (!cdev)
48 return 0;
49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51}
52
53void
54device_set_disconnected(struct subchannel *sch)
55{
56 struct ccw_device *cdev;
57
58 cdev = sch_get_cdev(sch);
59 if (!cdev)
60 return;
61 ccw_device_set_timeout(cdev, 0);
62 cdev->private->flags.fake_irb = 0;
63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
66}
67
68void device_set_intretry(struct subchannel *sch)
69{
70 struct ccw_device *cdev;
71
72 cdev = sch_get_cdev(sch);
73 if (!cdev)
74 return;
75 cdev->private->flags.intretry = 1;
76}
77
78int device_trigger_verify(struct subchannel *sch)
79{
80 struct ccw_device *cdev;
81
82 cdev = sch_get_cdev(sch);
83 if (!cdev || !cdev->online)
84 return -EINVAL;
85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 return 0;
87}
88
89static int __init ccw_timeout_log_setup(char *unused) 29static int __init ccw_timeout_log_setup(char *unused)
90{ 30{
91 timeout_log_enabled = 1; 31 timeout_log_enabled = 1;
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev)
99 struct schib schib; 39 struct schib schib;
100 struct subchannel *sch; 40 struct subchannel *sch;
101 struct io_subchannel_private *private; 41 struct io_subchannel_private *private;
42 union orb *orb;
102 int cc; 43 int cc;
103 44
104 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb;
106 cc = stsch(sch->schid, &schib); 48 cc = stsch(sch->schid, &schib);
107 49
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n"); 52 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0); 54 orb, sizeof(*orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 57 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 58 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117 59
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || 60 if (orb->tm.b) {
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws) 61 printk(KERN_WARNING "cio: orb indicates transport mode\n");
120 printk(KERN_WARNING "cio: last channel program (intern):\n"); 62 printk(KERN_WARNING "cio: last tcw:\n");
121 else 63 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
122 printk(KERN_WARNING "cio: last channel program:\n"); 64 (void *)(addr_t)orb->tm.tcw,
123 65 sizeof(struct tcw), 0);
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 } else {
125 (void *)(addr_t)private->orb.cpa, 67 printk(KERN_WARNING "cio: orb indicates command mode\n");
126 sizeof(struct ccw1), 0); 68 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
69 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
70 printk(KERN_WARNING "cio: last channel program "
71 "(intern):\n");
72 else
73 printk(KERN_WARNING "cio: last channel program:\n");
74
75 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
76 (void *)(addr_t)orb->cmd.cpa,
77 sizeof(struct ccw1), 0);
78 }
127 printk(KERN_WARNING "cio: ccw device state: %d\n", 79 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state); 80 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 81 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
171 add_timer(&cdev->private->timer); 123 add_timer(&cdev->private->timer);
172} 124}
173 125
174/* Kill any pending timers after machine check. */
175void
176device_kill_pending_timer(struct subchannel *sch)
177{
178 struct ccw_device *cdev;
179
180 cdev = sch_get_cdev(sch);
181 if (!cdev)
182 return;
183 ccw_device_set_timeout(cdev, 0);
184}
185
186/* 126/*
187 * Cancel running i/o. This is called repeatedly since halt/clear are 127 * Cancel running i/o. This is called repeatedly since halt/clear are
188 * asynchronous operations. We do one try with cio_cancel, two tries 128 * asynchronous operations. We do one try with cio_cancel, two tries
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
205 /* Not operational -> done. */ 145 /* Not operational -> done. */
206 return 0; 146 return 0;
207 /* Stage 1: cancel io. */ 147 /* Stage 1: cancel io. */
208 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
209 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
210 ret = cio_cancel(sch); 150 if (!scsw_is_tm(&sch->schib.scsw)) {
211 if (ret != -EINVAL) 151 ret = cio_cancel(sch);
212 return ret; 152 if (ret != -EINVAL)
213 /* cancel io unsuccessful. From now on it is asynchronous. */ 153 return ret;
154 }
155 /* cancel io unsuccessful or not applicable (transport mode).
156 * Continue with asynchronous instructions. */
214 cdev->private->iretry = 3; /* 3 halt retries. */ 157 cdev->private->iretry = 3; /* 3 halt retries. */
215 } 158 }
216 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
217 /* Stage 2: halt io. */ 160 /* Stage 2: halt io. */
218 if (cdev->private->iretry) { 161 if (cdev->private->iretry) {
219 cdev->private->iretry--; 162 cdev->private->iretry--;
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
388 } 331 }
389} 332}
390 333
334int ccw_device_notify(struct ccw_device *cdev, int event)
335{
336 if (!cdev->drv)
337 return 0;
338 if (!cdev->online)
339 return 0;
340 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
341}
342
391static void 343static void
392ccw_device_oper_notify(struct work_struct *work) 344ccw_device_oper_notify(struct work_struct *work)
393{ 345{
394 struct ccw_device_private *priv; 346 struct ccw_device_private *priv;
395 struct ccw_device *cdev; 347 struct ccw_device *cdev;
396 struct subchannel *sch;
397 int ret; 348 int ret;
398 unsigned long flags;
399 349
400 priv = container_of(work, struct ccw_device_private, kick_work); 350 priv = container_of(work, struct ccw_device_private, kick_work);
401 cdev = priv->cdev; 351 cdev = priv->cdev;
402 spin_lock_irqsave(cdev->ccwlock, flags); 352 ret = ccw_device_notify(cdev, CIO_OPER);
403 sch = to_subchannel(cdev->dev.parent);
404 if (sch->driver && sch->driver->notify) {
405 spin_unlock_irqrestore(cdev->ccwlock, flags);
406 ret = sch->driver->notify(sch, CIO_OPER);
407 spin_lock_irqsave(cdev->ccwlock, flags);
408 } else
409 ret = 0;
410 if (ret) { 353 if (ret) {
411 /* Reenable channel measurements, if needed. */ 354 /* Reenable channel measurements, if needed. */
412 spin_unlock_irqrestore(cdev->ccwlock, flags);
413 cmf_reenable(cdev); 355 cmf_reenable(cdev);
414 spin_lock_irqsave(cdev->ccwlock, flags);
415 wake_up(&cdev->private->wait_q); 356 wake_up(&cdev->private->wait_q);
416 } 357 } else
417 spin_unlock_irqrestore(cdev->ccwlock, flags);
418 if (!ret)
419 /* Driver doesn't want device back. */ 358 /* Driver doesn't want device back. */
420 ccw_device_do_unreg_rereg(work); 359 ccw_device_do_unreg_rereg(work);
421} 360}
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
621 /* Deliver fake irb to device driver, if needed. */ 560 /* Deliver fake irb to device driver, if needed. */
622 if (cdev->private->flags.fake_irb) { 561 if (cdev->private->flags.fake_irb) {
623 memset(&cdev->private->irb, 0, sizeof(struct irb)); 562 memset(&cdev->private->irb, 0, sizeof(struct irb));
624 cdev->private->irb.scsw.cc = 1; 563 cdev->private->irb.scsw.cmd.cc = 1;
625 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; 564 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
626 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; 565 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
627 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; 566 cdev->private->irb.scsw.cmd.stctl =
567 SCSW_STCTL_STATUS_PEND;
628 cdev->private->flags.fake_irb = 0; 568 cdev->private->flags.fake_irb = 0;
629 if (cdev->handler) 569 if (cdev->handler)
630 cdev->handler(cdev, cdev->private->intparm, 570 cdev->handler(cdev, cdev->private->intparm,
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev)
718 sch = to_subchannel(cdev->dev.parent); 658 sch = to_subchannel(cdev->dev.parent);
719 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 659 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
720 return -ENODEV; 660 return -ENODEV;
721 if (cdev->private->state != DEV_STATE_ONLINE) { 661 if (scsw_actl(&sch->schib.scsw) != 0)
722 if (sch->schib.scsw.actl != 0)
723 return -EBUSY;
724 return -EINVAL;
725 }
726 if (sch->schib.scsw.actl != 0)
727 return -EBUSY; 662 return -EBUSY;
663 if (cdev->private->state != DEV_STATE_ONLINE)
664 return -EINVAL;
728 /* Are we doing path grouping? */ 665 /* Are we doing path grouping? */
729 if (!cdev->private->options.pgroup) { 666 if (!cdev->private->options.pgroup) {
730 /* No, set state offline immediately. */ 667 /* No, set state offline immediately. */
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
799 */ 736 */
800 stsch(sch->schid, &sch->schib); 737 stsch(sch->schid, &sch->schib);
801 738
802 if (sch->schib.scsw.actl != 0 || 739 if (scsw_actl(&sch->schib.scsw) != 0 ||
803 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || 740 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
804 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 741 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
805 /* 742 /*
806 * No final status yet or final status not yet delivered 743 * No final status yet or final status not yet delivered
807 * to the device driver. Can't do path verfication now, 744 * to the device driver. Can't do path verfication now,
@@ -823,13 +760,13 @@ static void
823ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 760ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
824{ 761{
825 struct irb *irb; 762 struct irb *irb;
763 int is_cmd;
826 764
827 irb = (struct irb *) __LC_IRB; 765 irb = (struct irb *) __LC_IRB;
766 is_cmd = !scsw_is_tm(&irb->scsw);
828 /* Check for unsolicited interrupt. */ 767 /* Check for unsolicited interrupt. */
829 if ((irb->scsw.stctl == 768 if (!scsw_is_solicited(&irb->scsw)) {
830 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 769 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
831 && (!irb->scsw.cc)) {
832 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
833 !irb->esw.esw0.erw.cons) { 770 !irb->esw.esw0.erw.cons) {
834 /* Unit check but no sense data. Need basic sense. */ 771 /* Unit check but no sense data. Need basic sense. */
835 if (ccw_device_do_sense(cdev, irb) != 0) 772 if (ccw_device_do_sense(cdev, irb) != 0)
@@ -848,7 +785,7 @@ call_handler_unsol:
848 } 785 }
849 /* Accumulate status and find out if a basic sense is needed. */ 786 /* Accumulate status and find out if a basic sense is needed. */
850 ccw_device_accumulate_irb(cdev, irb); 787 ccw_device_accumulate_irb(cdev, irb);
851 if (cdev->private->flags.dosense) { 788 if (is_cmd && cdev->private->flags.dosense) {
852 if (ccw_device_do_sense(cdev, irb) == 0) { 789 if (ccw_device_do_sense(cdev, irb) == 0) {
853 cdev->private->state = DEV_STATE_W4SENSE; 790 cdev->private->state = DEV_STATE_W4SENSE;
854 } 791 }
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
892 829
893 irb = (struct irb *) __LC_IRB; 830 irb = (struct irb *) __LC_IRB;
894 /* Check for unsolicited interrupt. */ 831 /* Check for unsolicited interrupt. */
895 if (irb->scsw.stctl == 832 if (scsw_stctl(&irb->scsw) ==
896 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 833 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
897 if (irb->scsw.cc == 1) 834 if (scsw_cc(&irb->scsw) == 1)
898 /* Basic sense hasn't started. Try again. */ 835 /* Basic sense hasn't started. Try again. */
899 ccw_device_do_sense(cdev, irb); 836 ccw_device_do_sense(cdev, irb);
900 else { 837 else {
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
912 * only deliver the halt/clear interrupt to the device driver as if it 849 * only deliver the halt/clear interrupt to the device driver as if it
913 * had killed the original request. 850 * had killed the original request.
914 */ 851 */
915 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 852 if (scsw_fctl(&irb->scsw) &
853 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
916 /* Retry Basic Sense if requested. */ 854 /* Retry Basic Sense if requested. */
917 if (cdev->private->flags.intretry) { 855 if (cdev->private->flags.intretry) {
918 cdev->private->flags.intretry = 0; 856 cdev->private->flags.intretry = 0;
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
986 ERR_PTR(-EIO)); 924 ERR_PTR(-EIO));
987} 925}
988 926
989void device_kill_io(struct subchannel *sch) 927void ccw_device_kill_io(struct ccw_device *cdev)
990{ 928{
991 int ret; 929 int ret;
992 struct ccw_device *cdev;
993 930
994 cdev = sch_get_cdev(sch);
995 ret = ccw_device_cancel_halt_clear(cdev); 931 ret = ccw_device_cancel_halt_clear(cdev);
996 if (ret == -EBUSY) { 932 if (ret == -EBUSY) {
997 ccw_device_set_timeout(cdev, 3*HZ); 933 ccw_device_set_timeout(cdev, 3*HZ);
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1021 case DEV_EVENT_INTERRUPT: 957 case DEV_EVENT_INTERRUPT:
1022 irb = (struct irb *) __LC_IRB; 958 irb = (struct irb *) __LC_IRB;
1023 /* Check for unsolicited interrupt. */ 959 /* Check for unsolicited interrupt. */
1024 if ((irb->scsw.stctl == 960 if ((scsw_stctl(&irb->scsw) ==
1025 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 961 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1026 (!irb->scsw.cc)) 962 (!scsw_cc(&irb->scsw)))
1027 /* FIXME: we should restart stlck here, but this 963 /* FIXME: we should restart stlck here, but this
1028 * is extremely unlikely ... */ 964 * is extremely unlikely ... */
1029 goto out_wakeup; 965 goto out_wakeup;
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1055 ccw_device_sense_id_start(cdev); 991 ccw_device_sense_id_start(cdev);
1056} 992}
1057 993
1058void 994void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1059device_trigger_reprobe(struct subchannel *sch)
1060{ 995{
1061 struct ccw_device *cdev; 996 struct subchannel *sch;
1062 997
1063 cdev = sch_get_cdev(sch);
1064 if (!cdev)
1065 return;
1066 if (cdev->private->state != DEV_STATE_DISCONNECTED) 998 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 return; 999 return;
1068 1000
1001 sch = to_subchannel(cdev->dev.parent);
1069 /* Update some values. */ 1002 /* Update some values. */
1070 if (stsch(sch->schid, &sch->schib)) 1003 if (stsch(sch->schid, &sch->schib))
1071 return; 1004 return;
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch)
1081 sch->schib.pmcw.ena = 0; 1014 sch->schib.pmcw.ena = 0;
1082 if ((sch->lpm & (sch->lpm - 1)) != 0) 1015 if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 sch->schib.pmcw.mp = 1; 1016 sch->schib.pmcw.mp = 1;
1084 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 /* We should also udate ssd info, but this has to wait. */ 1017 /* We should also udate ssd info, but this has to wait. */
1086 /* Check if this is another device which appeared on the same sch. */ 1018 /* Check if this is another device which appeared on the same sch. */
1087 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1019 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index cba7020517ed..1bdaa614e34f 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
196 irb = &cdev->private->irb; 196 irb = &cdev->private->irb;
197 197
198 /* Check the error cases. */ 198 /* Check the error cases. */
199 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
200 /* Retry Sense ID if requested. */ 200 /* Retry Sense ID if requested. */
201 if (cdev->private->flags.intretry) { 201 if (cdev->private->flags.intretry) {
202 cdev->private->flags.intretry = 0; 202 cdev->private->flags.intretry = 0;
@@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
234 irb->ecw[6], irb->ecw[7]); 234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN; 235 return -EAGAIN;
236 } 236 }
237 if (irb->scsw.cc == 3) { 237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm; 238 u8 lpm;
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.cmd.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
@@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
248 } 248 }
249 249
250 /* Did we get a proper answer ? */ 250 /* Did we get a proper answer ? */
251 if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && 251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) { 252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.count < sizeof(struct senseid) - 8) 253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1; 254 cdev->private->flags.esid = 1;
255 return 0; /* Success */ 255 return 0; /* Success */
256 } 256 }
@@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
260 "subchannel 0.%x.%04x returns status %02X%02X\n", 260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid, 261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no, 262 sch->schid.sch_no,
263 irb->scsw.dstat, irb->scsw.cstat); 263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 264 return -EAGAIN;
265} 265}
266 266
@@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
277 sch = to_subchannel(cdev->dev.parent); 277 sch = to_subchannel(cdev->dev.parent);
278 irb = (struct irb *) __LC_IRB; 278 irb = (struct irb *) __LC_IRB;
279 /* Retry sense id, if needed. */ 279 /* Retry sense id, if needed. */
280 if (irb->scsw.stctl == 280 if (irb->scsw.cmd.stctl ==
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
282 if ((irb->scsw.cc == 1) || !irb->scsw.actl) { 282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
283 ret = __ccw_device_sense_id_start(cdev); 283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY) 284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret); 285 ccw_device_sense_id_done(cdev, ret);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f308ad55a6d5..ee1a28310fbb 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -17,6 +17,7 @@
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h> 19#include <asm/chpid.h>
20#include <asm/fcx.h>
20 21
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
179 return -EBUSY; 180 return -EBUSY;
180 } 181 }
181 if (cdev->private->state != DEV_STATE_ONLINE || 182 if (cdev->private->state != DEV_STATE_ONLINE ||
182 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 183 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
183 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 184 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
184 cdev->private->flags.doverify) 185 cdev->private->flags.doverify)
185 return -EBUSY; 186 return -EBUSY;
186 ret = cio_set_options (sch, flags); 187 ret = cio_set_options (sch, flags);
@@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev)
379 if (cdev->private->state == DEV_STATE_NOT_OPER) 380 if (cdev->private->state == DEV_STATE_NOT_OPER)
380 return -ENODEV; 381 return -ENODEV;
381 if (cdev->private->state != DEV_STATE_ONLINE || 382 if (cdev->private->state != DEV_STATE_ONLINE ||
382 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 383 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
383 return -EINVAL; 384 return -EINVAL;
384 return cio_resume(sch); 385 return cio_resume(sch);
385} 386}
@@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev)
404 * - fast notification was requested (primary status) 405 * - fast notification was requested (primary status)
405 * - unsolicited interrupts 406 * - unsolicited interrupts
406 */ 407 */
407 stctl = cdev->private->irb.scsw.stctl; 408 stctl = scsw_stctl(&cdev->private->irb.scsw);
408 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 409 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
409 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 410 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
410 (stctl == SCSW_STCTL_STATUS_PEND); 411 (stctl == SCSW_STCTL_STATUS_PEND);
@@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev)
528 cio_disable_subchannel(sch); //FIXME: return code? 529 cio_disable_subchannel(sch); //FIXME: return code?
529 goto out_unlock; 530 goto out_unlock;
530 } 531 }
531 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
532 spin_unlock_irqrestore(sch->lock, flags); 533 spin_unlock_irqrestore(sch->lock, flags);
533 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 534 wait_event(cdev->private->wait_q,
535 cdev->private->irb.scsw.cmd.actl == 0);
534 spin_lock_irqsave(sch->lock, flags); 536 spin_lock_irqsave(sch->lock, flags);
535 cio_disable_subchannel(sch); //FIXME: return code? 537 cio_disable_subchannel(sch); //FIXME: return code?
536 if ((cdev->private->irb.scsw.dstat != 538 if ((cdev->private->irb.scsw.cmd.dstat !=
537 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
538 (cdev->private->irb.scsw.cstat != 0)) 540 (cdev->private->irb.scsw.cmd.cstat != 0))
539 ret = -EIO; 541 ret = -EIO;
540 /* Clear irb. */ 542 /* Clear irb. */
541 memset(&cdev->private->irb, 0, sizeof(struct irb)); 543 memset(&cdev->private->irb, 0, sizeof(struct irb));
@@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
568} 570}
569EXPORT_SYMBOL(ccw_device_get_id); 571EXPORT_SYMBOL(ccw_device_get_id);
570 572
573/**
574 * ccw_device_tm_start_key - perform start function
575 * @cdev: ccw device on which to perform the start function
576 * @tcw: transport-command word to be started
577 * @intparm: user defined parameter to be passed to the interrupt handler
578 * @lpm: mask of paths to use
579 * @key: storage key to use for storage access
580 *
581 * Start the tcw on the given ccw device. Return zero on success, non-zero
582 * otherwise.
583 */
584int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
585 unsigned long intparm, u8 lpm, u8 key)
586{
587 struct subchannel *sch;
588 int rc;
589
590 sch = to_subchannel(cdev->dev.parent);
591 if (cdev->private->state != DEV_STATE_ONLINE)
592 return -EIO;
593 /* Adjust requested path mask to excluded varied off paths. */
594 if (lpm) {
595 lpm &= sch->opm;
596 if (lpm == 0)
597 return -EACCES;
598 }
599 rc = cio_tm_start_key(sch, tcw, lpm, key);
600 if (rc == 0)
601 cdev->private->intparm = intparm;
602 return rc;
603}
604EXPORT_SYMBOL(ccw_device_tm_start_key);
605
606/**
607 * ccw_device_tm_start_timeout_key - perform start function
608 * @cdev: ccw device on which to perform the start function
609 * @tcw: transport-command word to be started
610 * @intparm: user defined parameter to be passed to the interrupt handler
611 * @lpm: mask of paths to use
612 * @key: storage key to use for storage access
613 * @expires: time span in jiffies after which to abort request
614 *
615 * Start the tcw on the given ccw device. Return zero on success, non-zero
616 * otherwise.
617 */
618int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
619 unsigned long intparm, u8 lpm, u8 key,
620 int expires)
621{
622 int ret;
623
624 ccw_device_set_timeout(cdev, expires);
625 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
626 if (ret != 0)
627 ccw_device_set_timeout(cdev, 0);
628 return ret;
629}
630EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
631
632/**
633 * ccw_device_tm_start - perform start function
634 * @cdev: ccw device on which to perform the start function
635 * @tcw: transport-command word to be started
636 * @intparm: user defined parameter to be passed to the interrupt handler
637 * @lpm: mask of paths to use
638 *
639 * Start the tcw on the given ccw device. Return zero on success, non-zero
640 * otherwise.
641 */
642int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
643 unsigned long intparm, u8 lpm)
644{
645 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
646 PAGE_DEFAULT_KEY);
647}
648EXPORT_SYMBOL(ccw_device_tm_start);
649
650/**
651 * ccw_device_tm_start_timeout - perform start function
652 * @cdev: ccw device on which to perform the start function
653 * @tcw: transport-command word to be started
654 * @intparm: user defined parameter to be passed to the interrupt handler
655 * @lpm: mask of paths to use
656 * @expires: time span in jiffies after which to abort request
657 *
658 * Start the tcw on the given ccw device. Return zero on success, non-zero
659 * otherwise.
660 */
661int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
662 unsigned long intparm, u8 lpm, int expires)
663{
664 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
665 PAGE_DEFAULT_KEY, expires);
666}
667EXPORT_SYMBOL(ccw_device_tm_start_timeout);
668
669/**
670 * ccw_device_tm_intrg - perform interrogate function
671 * @cdev: ccw device on which to perform the interrogate function
672 *
673 * Perform an interrogate function on the given ccw device. Return zero on
674 * success, non-zero otherwise.
675 */
676int ccw_device_tm_intrg(struct ccw_device *cdev)
677{
678 struct subchannel *sch = to_subchannel(cdev->dev.parent);
679
680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
684 return -EINVAL;
685 return cio_tm_intrg(sch);
686}
687EXPORT_SYMBOL(ccw_device_tm_intrg);
688
571// FIXME: these have to go: 689// FIXME: these have to go:
572 690
573int 691int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 5cf7be008e98..86bc94eb607f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -28,13 +28,13 @@
28 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
29 * operation should be tried again. 29 * operation should be tried again.
30 */ 30 */
31static int __ccw_device_should_retry(struct scsw *scsw) 31static int __ccw_device_should_retry(union scsw *scsw)
32{ 32{
33 /* CC is only valid if start function bit is set. */ 33 /* CC is only valid if start function bit is set. */
34 if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) 34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
35 return 1; 35 return 1;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 36 /* No more activity. For sense and set PGID we stubbornly try again. */
37 if (!scsw->actl) 37 if (!scsw->cmd.actl)
38 return 1; 38 return 1;
39 return 0; 39 return 0;
40} 40}
@@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
125 125
126 sch = to_subchannel(cdev->dev.parent); 126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb; 127 irb = &cdev->private->irb;
128 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */ 129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) { 130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0; 131 cdev->private->flags.intretry = 0;
@@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 irb->ecw[6], irb->ecw[7]); 155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN; 156 return -EAGAIN;
157 } 157 }
158 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
188 188
189 irb = (struct irb *) __LC_IRB; 189 irb = (struct irb *) __LC_IRB;
190 190
191 if (irb->scsw.stctl == 191 if (irb->scsw.cmd.stctl ==
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
193 if (__ccw_device_should_retry(&irb->scsw)) { 193 if (__ccw_device_should_retry(&irb->scsw)) {
194 ret = __ccw_device_sense_pgid_start(cdev); 194 ret = __ccw_device_sense_pgid_start(cdev);
@@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
331 331
332 sch = to_subchannel(cdev->dev.parent); 332 sch = to_subchannel(cdev->dev.parent);
333 irb = &cdev->private->irb; 333 irb = &cdev->private->irb;
334 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
335 /* Retry Set PGID if requested. */ 335 /* Retry Set PGID if requested. */
336 if (cdev->private->flags.intretry) { 336 if (cdev->private->flags.intretry) {
337 cdev->private->flags.intretry = 0; 337 cdev->private->flags.intretry = 0;
@@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
355 irb->ecw[6], irb->ecw[7]); 355 irb->ecw[6], irb->ecw[7]);
356 return -EAGAIN; 356 return -EAGAIN;
357 } 357 }
358 if (irb->scsw.cc == 3) { 358 if (irb->scsw.cmd.cc == 3) {
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
360 " lpm %02X, became 'not operational'\n", 360 " lpm %02X, became 'not operational'\n",
361 cdev->private->dev_id.devno, sch->schid.ssid, 361 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
376 376
377 sch = to_subchannel(cdev->dev.parent); 377 sch = to_subchannel(cdev->dev.parent);
378 irb = &cdev->private->irb; 378 irb = &cdev->private->irb;
379 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
380 /* Retry NOP if requested. */ 380 /* Retry NOP if requested. */
381 if (cdev->private->flags.intretry) { 381 if (cdev->private->flags.intretry) {
382 cdev->private->flags.intretry = 0; 382 cdev->private->flags.intretry = 0;
@@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
384 } 384 }
385 return -ETIME; 385 return -ETIME;
386 } 386 }
387 if (irb->scsw.cc == 3) { 387 if (irb->scsw.cmd.cc == 3) {
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
389 " lpm %02X, became 'not operational'\n", 389 " lpm %02X, became 'not operational'\n",
390 cdev->private->dev_id.devno, sch->schid.ssid, 390 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
438 438
439 irb = (struct irb *) __LC_IRB; 439 irb = (struct irb *) __LC_IRB;
440 440
441 if (irb->scsw.stctl == 441 if (irb->scsw.cmd.stctl ==
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
443 if (__ccw_device_should_retry(&irb->scsw)) 443 if (__ccw_device_should_retry(&irb->scsw))
444 __ccw_device_verify_start(cdev); 444 __ccw_device_verify_start(cdev);
@@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
544 544
545 irb = (struct irb *) __LC_IRB; 545 irb = (struct irb *) __LC_IRB;
546 546
547 if (irb->scsw.stctl == 547 if (irb->scsw.cmd.stctl ==
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
549 if (__ccw_device_should_retry(&irb->scsw)) 549 if (__ccw_device_should_retry(&irb->scsw))
550 __ccw_device_disband_start(cdev); 550 __ccw_device_disband_start(cdev);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 4a38993000f2..1b03c5423be2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -29,9 +29,11 @@
29static void 29static void
30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
31{ 31{
32 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 32 char dbf_text[15];
33 SCHN_STAT_CHN_CTRL_CHK | 33
34 SCHN_STAT_INTF_CTRL_CHK))) 34 if (!scsw_is_valid_cstat(&irb->scsw) ||
35 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
36 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
35 return; 37 return;
36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 38 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
37 "received" 39 "received"
@@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
39 ": %02X sch_stat : %02X\n", 41 ": %02X sch_stat : %02X\n",
40 cdev->private->dev_id.devno, cdev->private->schid.ssid, 42 cdev->private->dev_id.devno, cdev->private->schid.ssid,
41 cdev->private->schid.sch_no, 43 cdev->private->schid.sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 44 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
43 45 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
44 if (irb->scsw.cc != 3) { 46 CIO_TRACE_EVENT(0, dbf_text);
45 char dbf_text[15]; 47 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
46
47 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
48 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 }
51} 48}
52 49
53/* 50/*
@@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
81 * are condition that have to be met for the extended control 78 * are condition that have to be met for the extended control
82 * bit to have meaning. Sick. 79 * bit to have meaning. Sick.
83 */ 80 */
84 cdev->private->irb.scsw.ectl = 0; 81 cdev->private->irb.scsw.cmd.ectl = 0;
85 if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && 82 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
86 !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) 83 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
87 cdev->private->irb.scsw.ectl = irb->scsw.ectl; 84 cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
88 /* Check if extended control word is valid. */ 85 /* Check if extended control word is valid. */
89 if (!cdev->private->irb.scsw.ectl) 86 if (!cdev->private->irb.scsw.cmd.ectl)
90 return; 87 return;
91 /* Copy concurrent sense / model dependent information. */ 88 /* Copy concurrent sense / model dependent information. */
92 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); 89 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
@@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
98static int 95static int
99ccw_device_accumulate_esw_valid(struct irb *irb) 96ccw_device_accumulate_esw_valid(struct irb *irb)
100{ 97{
101 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 98 if (!irb->scsw.cmd.eswf &&
99 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
102 return 0; 100 return 0;
103 if (irb->scsw.stctl == 101 if (irb->scsw.cmd.stctl ==
104 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && 102 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
105 !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 103 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
106 return 0; 104 return 0;
107 return 1; 105 return 1;
108} 106}
@@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
125 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; 123 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
126 124
127 /* Copy subchannel logout information if esw is of format 0. */ 125 /* Copy subchannel logout information if esw is of format 0. */
128 if (irb->scsw.eswf) { 126 if (irb->scsw.cmd.eswf) {
129 cdev_sublog = &cdev_irb->esw.esw0.sublog; 127 cdev_sublog = &cdev_irb->esw.esw0.sublog;
130 sublog = &irb->esw.esw0.sublog; 128 sublog = &irb->esw.esw0.sublog;
131 /* Copy extended status flags. */ 129 /* Copy extended status flags. */
@@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
134 * Copy fields that have a meaning for channel data check 132 * Copy fields that have a meaning for channel data check
135 * channel control check and interface control check. 133 * channel control check and interface control check.
136 */ 134 */
137 if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 135 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
138 SCHN_STAT_CHN_CTRL_CHK | 136 SCHN_STAT_CHN_CTRL_CHK |
139 SCHN_STAT_INTF_CTRL_CHK)) { 137 SCHN_STAT_INTF_CTRL_CHK)) {
140 /* Copy ancillary report bit. */ 138 /* Copy ancillary report bit. */
@@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
155 /* Copy i/o-error alert. */ 153 /* Copy i/o-error alert. */
156 cdev_sublog->ioerr = sublog->ioerr; 154 cdev_sublog->ioerr = sublog->ioerr;
157 /* Copy channel path timeout bit. */ 155 /* Copy channel path timeout bit. */
158 if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) 156 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
159 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; 157 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
160 /* Copy failing storage address validity flag. */ 158 /* Copy failing storage address validity flag. */
161 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; 159 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
@@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
200 * If not, the remaining bit have no meaning and we must ignore them. 198 * If not, the remaining bit have no meaning and we must ignore them.
201 * The esw is not meaningful as well... 199 * The esw is not meaningful as well...
202 */ 200 */
203 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 201 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
204 return; 202 return;
205 203
206 /* Check for channel checks and interface control checks. */ 204 /* Check for channel checks and interface control checks. */
207 ccw_device_msg_control_check(cdev, irb); 205 ccw_device_msg_control_check(cdev, irb);
208 206
209 /* Check for path not operational. */ 207 /* Check for path not operational. */
210 if (irb->scsw.pno && irb->scsw.fctl != 0 && 208 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
211 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
212 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
213 ccw_device_path_notoper(cdev); 209 ccw_device_path_notoper(cdev);
214 210 /* No irb accumulation for transport mode irbs. */
211 if (scsw_is_tm(&irb->scsw)) {
212 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
213 return;
214 }
215 /* 215 /*
216 * Don't accumulate unsolicited interrupts. 216 * Don't accumulate unsolicited interrupts.
217 */ 217 */
218 if ((irb->scsw.stctl == 218 if (!scsw_is_solicited(&irb->scsw))
219 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
220 (!irb->scsw.cc))
221 return; 219 return;
222 220
223 cdev_irb = &cdev->private->irb; 221 cdev_irb = &cdev->private->irb;
@@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
227 * status at the subchannel has been cleared and we must not pass 225 * status at the subchannel has been cleared and we must not pass
228 * intermediate accumulated status to the device driver. 226 * intermediate accumulated status to the device driver.
229 */ 227 */
230 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) 228 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
231 memset(&cdev->private->irb, 0, sizeof(struct irb)); 229 memset(&cdev->private->irb, 0, sizeof(struct irb));
232 230
233 /* Copy bits which are valid only for the start function. */ 231 /* Copy bits which are valid only for the start function. */
234 if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { 232 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
235 /* Copy key. */ 233 /* Copy key. */
236 cdev_irb->scsw.key = irb->scsw.key; 234 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
237 /* Copy suspend control bit. */ 235 /* Copy suspend control bit. */
238 cdev_irb->scsw.sctl = irb->scsw.sctl; 236 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
239 /* Accumulate deferred condition code. */ 237 /* Accumulate deferred condition code. */
240 cdev_irb->scsw.cc |= irb->scsw.cc; 238 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
241 /* Copy ccw format bit. */ 239 /* Copy ccw format bit. */
242 cdev_irb->scsw.fmt = irb->scsw.fmt; 240 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
243 /* Copy prefetch bit. */ 241 /* Copy prefetch bit. */
244 cdev_irb->scsw.pfch = irb->scsw.pfch; 242 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
245 /* Copy initial-status-interruption-control. */ 243 /* Copy initial-status-interruption-control. */
246 cdev_irb->scsw.isic = irb->scsw.isic; 244 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
247 /* Copy address limit checking control. */ 245 /* Copy address limit checking control. */
248 cdev_irb->scsw.alcc = irb->scsw.alcc; 246 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
249 /* Copy suppress suspend bit. */ 247 /* Copy suppress suspend bit. */
250 cdev_irb->scsw.ssi = irb->scsw.ssi; 248 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
251 } 249 }
252 250
253 /* Take care of the extended control bit and extended control word. */ 251 /* Take care of the extended control bit and extended control word. */
254 ccw_device_accumulate_ecw(cdev, irb); 252 ccw_device_accumulate_ecw(cdev, irb);
255 253
256 /* Accumulate function control. */ 254 /* Accumulate function control. */
257 cdev_irb->scsw.fctl |= irb->scsw.fctl; 255 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
258 /* Copy activity control. */ 256 /* Copy activity control. */
259 cdev_irb->scsw.actl= irb->scsw.actl; 257 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
260 /* Accumulate status control. */ 258 /* Accumulate status control. */
261 cdev_irb->scsw.stctl |= irb->scsw.stctl; 259 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
262 /* 260 /*
263 * Copy ccw address if it is valid. This is a bit simplified 261 * Copy ccw address if it is valid. This is a bit simplified
264 * but should be close enough for all practical purposes. 262 * but should be close enough for all practical purposes.
265 */ 263 */
266 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || 264 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
267 ((irb->scsw.stctl == 265 ((irb->scsw.cmd.stctl ==
268 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && 266 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
269 (irb->scsw.actl & SCSW_ACTL_DEVACT) && 267 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
270 (irb->scsw.actl & SCSW_ACTL_SCHACT)) || 268 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
271 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 269 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
272 cdev_irb->scsw.cpa = irb->scsw.cpa; 270 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
273 /* Accumulate device status, but not the device busy flag. */ 271 /* Accumulate device status, but not the device busy flag. */
274 cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; 272 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
275 /* dstat is not always valid. */ 273 /* dstat is not always valid. */
276 if (irb->scsw.stctl & 274 if (irb->scsw.cmd.stctl &
277 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS 275 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
278 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) 276 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
279 cdev_irb->scsw.dstat |= irb->scsw.dstat; 277 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
280 /* Accumulate subchannel status. */ 278 /* Accumulate subchannel status. */
281 cdev_irb->scsw.cstat |= irb->scsw.cstat; 279 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
282 /* Copy residual count if it is valid. */ 280 /* Copy residual count if it is valid. */
283 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 281 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
284 (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) 282 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
285 cdev_irb->scsw.count = irb->scsw.count; 283 == 0)
284 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
286 285
287 /* Take care of bits in the extended status word. */ 286 /* Take care of bits in the extended status word. */
288 ccw_device_accumulate_esw(cdev, irb); 287 ccw_device_accumulate_esw(cdev, irb);
@@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
299 * sense facility available/supported when enabling the 298 * sense facility available/supported when enabling the
300 * concurrent sense facility. 299 * concurrent sense facility.
301 */ 300 */
302 if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 301 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303 !(cdev_irb->esw.esw0.erw.cons)) 302 !(cdev_irb->esw.esw0.erw.cons))
304 cdev->private->flags.dosense = 1; 303 cdev->private->flags.dosense = 1;
305} 304}
@@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
318 317
319 /* A sense is required, can we do it now ? */ 318 /* A sense is required, can we do it now ? */
320 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 319 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
321 /* 320 /*
322 * we received an Unit Check but we have no final 321 * we received an Unit Check but we have no final
323 * status yet, therefore we must delay the SENSE 322 * status yet, therefore we must delay the SENSE
@@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
355 * If not, the remaining bit have no meaning and we must ignore them. 354 * If not, the remaining bit have no meaning and we must ignore them.
356 * The esw is not meaningful as well... 355 * The esw is not meaningful as well...
357 */ 356 */
358 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 357 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
359 return; 358 return;
360 359
361 /* Check for channel checks and interface control checks. */ 360 /* Check for channel checks and interface control checks. */
362 ccw_device_msg_control_check(cdev, irb); 361 ccw_device_msg_control_check(cdev, irb);
363 362
364 /* Check for path not operational. */ 363 /* Check for path not operational. */
365 if (irb->scsw.pno && irb->scsw.fctl != 0 && 364 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
366 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
367 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
368 ccw_device_path_notoper(cdev); 365 ccw_device_path_notoper(cdev);
369 366
370 if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 367 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
371 (irb->scsw.dstat & DEV_STAT_CHN_END)) { 368 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
372 cdev->private->irb.esw.esw0.erw.cons = 1; 369 cdev->private->irb.esw.esw0.erw.cons = 1;
373 cdev->private->flags.dosense = 0; 370 cdev->private->flags.dosense = 0;
374 } 371 }
@@ -386,11 +383,11 @@ int
386ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) 383ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
387{ 384{
388 ccw_device_accumulate_irb(cdev, irb); 385 ccw_device_accumulate_irb(cdev, irb);
389 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 386 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
390 return -EBUSY; 387 return -EBUSY;
391 /* Check for basic sense. */ 388 /* Check for basic sense. */
392 if (cdev->private->flags.dosense && 389 if (cdev->private->flags.dosense &&
393 !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { 390 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
394 cdev->private->irb.esw.esw0.erw.cons = 1; 391 cdev->private->irb.esw.esw0.erw.cons = 1;
395 cdev->private->flags.dosense = 0; 392 cdev->private->flags.dosense = 0;
396 return 0; 393 return 0;
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000000..61677dfbdc9b
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
1/*
2 * Functions for assembling fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include "cio.h"
16
17/**
18 * tcw_get_intrg - return pointer to associated interrogate tcw
19 * @tcw: pointer to the original tcw
20 *
21 * Return a pointer to the interrogate tcw associated with the specified tcw
22 * or %NULL if there is no associated interrogate tcw.
23 */
24struct tcw *tcw_get_intrg(struct tcw *tcw)
25{
26 return (struct tcw *) ((addr_t) tcw->intrg);
27}
28EXPORT_SYMBOL(tcw_get_intrg);
29
30/**
31 * tcw_get_data - return pointer to input/output data associated with tcw
32 * @tcw: pointer to the tcw
33 *
34 * Return the input or output data address specified in the tcw depending
35 * on whether the r-bit or the w-bit is set. If neither bit is set, return
36 * %NULL.
37 */
38void *tcw_get_data(struct tcw *tcw)
39{
40 if (tcw->r)
41 return (void *) ((addr_t) tcw->input);
42 if (tcw->w)
43 return (void *) ((addr_t) tcw->output);
44 return NULL;
45}
46EXPORT_SYMBOL(tcw_get_data);
47
48/**
49 * tcw_get_tccb - return pointer to tccb associated with tcw
50 * @tcw: pointer to the tcw
51 *
52 * Return pointer to the tccb associated with this tcw.
53 */
54struct tccb *tcw_get_tccb(struct tcw *tcw)
55{
56 return (struct tccb *) ((addr_t) tcw->tccb);
57}
58EXPORT_SYMBOL(tcw_get_tccb);
59
60/**
61 * tcw_get_tsb - return pointer to tsb associated with tcw
62 * @tcw: pointer to the tcw
63 *
64 * Return pointer to the tsb associated with this tcw.
65 */
66struct tsb *tcw_get_tsb(struct tcw *tcw)
67{
68 return (struct tsb *) ((addr_t) tcw->tsb);
69}
70EXPORT_SYMBOL(tcw_get_tsb);
71
72/**
73 * tcw_init - initialize tcw data structure
74 * @tcw: pointer to the tcw to be initialized
75 * @r: initial value of the r-bit
76 * @w: initial value of the w-bit
77 *
78 * Initialize all fields of the specified tcw data structure with zero and
79 * fill in the format, flags, r and w fields.
80 */
81void tcw_init(struct tcw *tcw, int r, int w)
82{
83 memset(tcw, 0, sizeof(struct tcw));
84 tcw->format = TCW_FORMAT_DEFAULT;
85 tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
86 if (r)
87 tcw->r = 1;
88 if (w)
89 tcw->w = 1;
90}
91EXPORT_SYMBOL(tcw_init);
92
93static inline size_t tca_size(struct tccb *tccb)
94{
95 return tccb->tcah.tcal - 12;
96}
97
98static u32 calc_dcw_count(struct tccb *tccb)
99{
100 int offset;
101 struct dcw *dcw;
102 u32 count = 0;
103 size_t size;
104
105 size = tca_size(tccb);
106 for (offset = 0; offset < size;) {
107 dcw = (struct dcw *) &tccb->tca[offset];
108 count += dcw->count;
109 if (!(dcw->flags & DCW_FLAGS_CC))
110 break;
111 offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
112 }
113 return count;
114}
115
116static u32 calc_cbc_size(struct tidaw *tidaw, int num)
117{
118 int i;
119 u32 cbc_data;
120 u32 cbc_count = 0;
121 u64 data_count = 0;
122
123 for (i = 0; i < num; i++) {
124 if (tidaw[i].flags & TIDAW_FLAGS_LAST)
125 break;
126 /* TODO: find out if padding applies to total of data
127 * transferred or data transferred by this tidaw. Assumption:
128 * applies to total. */
129 data_count += tidaw[i].count;
130 if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
131 cbc_data = 4 + ALIGN(data_count, 4) - data_count;
132 cbc_count += cbc_data;
133 data_count += cbc_data;
134 }
135 }
136 return cbc_count;
137}
138
139/**
140 * tcw_finalize - finalize tcw length fields and tidaw list
141 * @tcw: pointer to the tcw
142 * @num_tidaws: the number of tidaws used to address input/output data or zero
143 * if no tida is used
144 *
145 * Calculate the input-/output-count and tccbl field in the tcw, add a
146 * tcat the tccb and terminate the data tidaw list if used.
147 *
148 * Note: in case input- or output-tida is used, the tidaw-list must be stored
149 * in contiguous storage (no ttic). The tcal field in the tccb must be
150 * up-to-date.
151 */
152void tcw_finalize(struct tcw *tcw, int num_tidaws)
153{
154 struct tidaw *tidaw;
155 struct tccb *tccb;
156 struct tccb_tcat *tcat;
157 u32 count;
158
159 /* Terminate tidaw list. */
160 tidaw = tcw_get_data(tcw);
161 if (num_tidaws > 0)
162 tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat));
167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
170 count += calc_cbc_size(tidaw, num_tidaws);
171 if (tcw->r)
172 tcw->input_count = count;
173 else if (tcw->w)
174 tcw->output_count = count;
175 tcat->count = ALIGN(count, 4) + 4;
176 /* Calculate tccbl. */
177 tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
178 sizeof(struct tccb_tcat) - 20) >> 2;
179}
180EXPORT_SYMBOL(tcw_finalize);
181
182/**
183 * tcw_set_intrg - set the interrogate tcw address of a tcw
184 * @tcw: the tcw address
185 * @intrg_tcw: the address of the interrogate tcw
186 *
187 * Set the address of the interrogate tcw in the specified tcw.
188 */
189void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
190{
191 tcw->intrg = (u32) ((addr_t) intrg_tcw);
192}
193EXPORT_SYMBOL(tcw_set_intrg);
194
195/**
196 * tcw_set_data - set data address and tida flag of a tcw
197 * @tcw: the tcw address
198 * @data: the data address
199 * @use_tidal: zero of the data address specifies a contiguous block of data,
200 * non-zero if it specifies a list if tidaws.
201 *
202 * Set the input/output data address of a tcw (depending on the value of the
203 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
204 * is set as well.
205 */
206void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
207{
208 if (tcw->r) {
209 tcw->input = (u64) ((addr_t) data);
210 if (use_tidal)
211 tcw->flags |= TCW_FLAGS_INPUT_TIDA;
212 } else if (tcw->w) {
213 tcw->output = (u64) ((addr_t) data);
214 if (use_tidal)
215 tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
216 }
217}
218EXPORT_SYMBOL(tcw_set_data);
219
220/**
221 * tcw_set_tccb - set tccb address of a tcw
222 * @tcw: the tcw address
223 * @tccb: the tccb address
224 *
225 * Set the address of the tccb in the specified tcw.
226 */
227void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
228{
229 tcw->tccb = (u64) ((addr_t) tccb);
230}
231EXPORT_SYMBOL(tcw_set_tccb);
232
233/**
234 * tcw_set_tsb - set tsb address of a tcw
235 * @tcw: the tcw address
236 * @tsb: the tsb address
237 *
238 * Set the address of the tsb in the specified tcw.
239 */
240void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
241{
242 tcw->tsb = (u64) ((addr_t) tsb);
243}
244EXPORT_SYMBOL(tcw_set_tsb);
245
246/**
247 * tccb_init - initialize tccb
248 * @tccb: the tccb address
249 * @size: the maximum size of the tccb
250 * @sac: the service-action-code to be user
251 *
252 * Initialize the header of the specified tccb by resetting all values to zero
253 * and filling in defaults for format, sac and initial tcal fields.
254 */
255void tccb_init(struct tccb *tccb, size_t size, u32 sac)
256{
257 memset(tccb, 0, size);
258 tccb->tcah.format = TCCB_FORMAT_DEFAULT;
259 tccb->tcah.sac = sac;
260 tccb->tcah.tcal = 12;
261}
262EXPORT_SYMBOL(tccb_init);
263
264/**
265 * tsb_init - initialize tsb
266 * @tsb: the tsb address
267 *
268 * Initialize the specified tsb by resetting all values to zero.
269 */
270void tsb_init(struct tsb *tsb)
271{
272 memset(tsb, 0, sizeof(tsb));
273}
274EXPORT_SYMBOL(tsb_init);
275
276/**
277 * tccb_add_dcw - add a dcw to the tccb
278 * @tccb: the tccb address
279 * @tccb_size: the maximum tccb size
280 * @cmd: the dcw command
281 * @flags: flags for the dcw
282 * @cd: pointer to control data for this dcw or NULL if none is required
283 * @cd_count: number of control data bytes for this dcw
284 * @count: number of data bytes for this dcw
285 *
286 * Add a new dcw to the specified tccb by writing the dcw information specified
287 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
288 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
289 * would exceed the available space as defined by @tccb_size.
290 *
291 * Note: the tcal field of the tccb header will be updates to reflect added
292 * content.
293 */
294struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
295 void *cd, u8 cd_count, u32 count)
296{
297 struct dcw *dcw;
298 int size;
299 int tca_offset;
300
301 /* Check for space. */
302 tca_offset = tca_size(tccb);
303 size = ALIGN(sizeof(struct dcw) + cd_count, 4);
304 if (sizeof(struct tccb_tcah) + tca_offset + size +
305 sizeof(struct tccb_tcat) > tccb_size)
306 return ERR_PTR(-ENOSPC);
307 /* Add dcw to tca. */
308 dcw = (struct dcw *) &tccb->tca[tca_offset];
309 memset(dcw, 0, size);
310 dcw->cmd = cmd;
311 dcw->flags = flags;
312 dcw->count = count;
313 dcw->cd_count = cd_count;
314 if (cd)
315 memcpy(&dcw->cd[0], cd, cd_count);
316 tccb->tcah.tcal += size;
317 return dcw;
318}
319EXPORT_SYMBOL(tccb_add_dcw);
320
321/**
322 * tcw_add_tidaw - add a tidaw to a tcw
323 * @tcw: the tcw address
324 * @num_tidaws: the current number of tidaws
325 * @flags: flags for the new tidaw
326 * @addr: address value for the new tidaw
327 * @count: count value for the new tidaw
328 *
329 * Add a new tidaw to the input/output data tidaw-list of the specified tcw
330 * (depending on the value of the r-flag and w-flag) and return a pointer to
331 * the new tidaw.
332 *
333 * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
334 * must ensure that there is enough space for the new tidaw. The last-tidaw
335 * flag for the last tidaw in the list will be set by tcw_finalize.
336 */
337struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
338 void *addr, u32 count)
339{
340 struct tidaw *tidaw;
341
342 /* Add tidaw to tidaw-list. */
343 tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
344 memset(tidaw, 0, sizeof(struct tidaw));
345 tidaw->flags = flags;
346 tidaw->count = count;
347 tidaw->addr = (u64) ((addr_t) addr);
348 return tidaw;
349}
350EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 144466ab8c15..528065cb5021 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -8,7 +8,7 @@
8#ifndef S390_IDSET_H 8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H 9#define S390_IDSET_H S390_IDSET_H
10 10
11#include "schid.h" 11#include <asm/schid.h>
12 12
13struct idset; 13struct idset;
14 14
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8c613160bfce..3f8f1cf69c76 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,12 +1,12 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include "schid.h" 4#include <asm/schid.h>
5 5
6/* 6/*
7 * operation request block 7 * command-mode operation request block
8 */ 8 */
9struct orb { 9struct cmd_orb {
10 u32 intparm; /* interruption parameter */ 10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */ 11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */ 12 u32 spnd : 1; /* suspend control */
@@ -28,8 +28,36 @@ struct orb {
28 u32 cpa; /* channel program address */ 28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4))); 29} __attribute__ ((packed, aligned(4)));
30 30
31/*
32 * transport-mode operation request block
33 */
34struct tm_orb {
35 u32 intparm;
36 u32 key:4;
37 u32 :9;
38 u32 b:1;
39 u32 :2;
40 u32 lpm:8;
41 u32 :7;
42 u32 x:1;
43 u32 tcw;
44 u32 prio:8;
45 u32 :8;
46 u32 rsvpgm:8;
47 u32 :8;
48 u32 :32;
49 u32 :32;
50 u32 :32;
51 u32 :32;
52} __attribute__ ((packed, aligned(4)));
53
54union orb {
55 struct cmd_orb cmd;
56 struct tm_orb tm;
57} __attribute__ ((packed, aligned(4)));
58
31struct io_subchannel_private { 59struct io_subchannel_private {
32 struct orb orb; /* operation request block */ 60 union orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */ 61 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8))); 62} __attribute__ ((aligned(8)));
35 63
@@ -95,16 +123,18 @@ struct ccw_device_private {
95 void *cmb_wait; /* deferred cmb enable/disable */ 123 void *cmb_wait; /* deferred cmb enable/disable */
96}; 124};
97 125
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) 126static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
99{ 127{
100 register struct subchannel_id reg1 asm("1") = schid; 128 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode; 129 int ccode = -EIO;
102 130
103 asm volatile( 131 asm volatile(
104 " ssch 0(%2)\n" 132 " ssch 0(%2)\n"
105 " ipm %0\n" 133 "0: ipm %0\n"
106 " srl %0,28" 134 " srl %0,28\n"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 135 "1:\n"
136 EX_TABLE(0b, 1b)
137 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode; 138 return ccode;
109} 139}
110 140
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 652ea3625f9d..9fa2ac13ac85 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -2,7 +2,7 @@
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include "schid.h" 5#include <asm/schid.h>
6 6
7/* 7/*
8 * TPI info structure 8 * TPI info structure
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 000000000000..c592087be0f1
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
1/*
2 * Functions for registration of I/O interruption subclasses on s390.
3 *
4 * Copyright IBM Corp. 2008
5 * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <asm/isc.h>
11
12static unsigned int isc_refs[MAX_ISC + 1];
13static DEFINE_SPINLOCK(isc_ref_lock);
14
15
16/**
17 * isc_register - register an I/O interruption subclass.
18 * @isc: I/O interruption subclass to register
19 *
20 * The number of users for @isc is increased. If this is the first user to
21 * register @isc, the corresponding I/O interruption subclass mask is enabled.
22 *
23 * Context:
24 * This function must not be called in interrupt context.
25 */
26void isc_register(unsigned int isc)
27{
28 if (isc > MAX_ISC) {
29 WARN_ON(1);
30 return;
31 }
32
33 spin_lock(&isc_ref_lock);
34 if (isc_refs[isc] == 0)
35 ctl_set_bit(6, 31 - isc);
36 isc_refs[isc]++;
37 spin_unlock(&isc_ref_lock);
38}
39EXPORT_SYMBOL_GPL(isc_register);
40
41/**
42 * isc_unregister - unregister an I/O interruption subclass.
43 * @isc: I/O interruption subclass to unregister
44 *
45 * The number of users for @isc is decreased. If this is the last user to
46 * unregister @isc, the corresponding I/O interruption subclass mask is
47 * disabled.
48 * Note: This function must not be called if isc_register() hasn't been called
49 * before by the driver for @isc.
50 *
51 * Context:
52 * This function must not be called in interrupt context.
53 */
54void isc_unregister(unsigned int isc)
55{
56 spin_lock(&isc_ref_lock);
57 /* check for misuse */
58 if (isc > MAX_ISC || isc_refs[isc] == 0) {
59 WARN_ON(1);
60 goto out_unlock;
61 }
62 if (isc_refs[isc] == 1)
63 ctl_clear_bit(6, 31 - isc);
64 isc_refs[isc]--;
65out_unlock:
66 spin_unlock(&isc_ref_lock);
67}
68EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000000..17da9ab932ed
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,327 @@
1/*
2 * Functions for incremental construction of fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include <asm/itcw.h>
16
17/**
18 * struct itcw - incremental tcw helper data type
19 *
20 * This structure serves as a handle for the incremental construction of a
21 * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
22 * tcw and associated data. The data structures are contained inside a single
23 * contiguous buffer provided by the user.
24 *
25 * The itcw construction functions take care of overall data integrity:
26 * - reset unused fields to zero
27 * - fill in required pointers
28 * - ensure required alignment for data structures
29 * - prevent data structures to cross 4k-byte boundary where required
30 * - calculate tccb-related length fields
31 * - optionally provide ready-made interrogate tcw and associated structures
32 *
33 * Restrictions apply to the itcws created with these construction functions:
34 * - tida only supported for data address, not for tccb
35 * - only contiguous tidaw-lists (no ttic)
36 * - total number of bytes required per itcw may not exceed 4k bytes
37 * - either read or write operation (may not work with r=0 and w=0)
38 *
39 * Example:
40 * struct itcw *itcw;
41 * void *buffer;
42 * size_t size;
43 *
44 * size = itcw_calc_size(1, 2, 0);
45 * buffer = kmalloc(size, GFP_DMA);
46 * if (!buffer)
47 * return -ENOMEM;
48 * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
49 * if (IS_ERR(itcw))
50 * return PTR_ER(itcw);
51 * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
52 * itcw_add_tidaw(itcw, 0, 0x30000, 20);
53 * itcw_add_tidaw(itcw, 0, 0x40000, 52);
54 * itcw_finalize(itcw);
55 *
56 */
57struct itcw {
58 struct tcw *tcw;
59 struct tcw *intrg_tcw;
60 int num_tidaws;
61 int max_tidaws;
62 int intrg_num_tidaws;
63 int intrg_max_tidaws;
64};
65
66/**
67 * itcw_get_tcw - return pointer to tcw associated with the itcw
68 * @itcw: address of the itcw
69 *
70 * Return pointer to the tcw associated with the itcw.
71 */
72struct tcw *itcw_get_tcw(struct itcw *itcw)
73{
74 return itcw->tcw;
75}
76EXPORT_SYMBOL(itcw_get_tcw);
77
78/**
79 * itcw_calc_size - return the size of an itcw with the given parameters
80 * @intrg: if non-zero, add an interrogate tcw
81 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
82 * if no tida is to be used.
83 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
84 * by the interrogate tcw, if specified
85 *
86 * Calculate and return the number of bytes required to hold an itcw with the
87 * given parameters and assuming tccbs with maximum size.
88 *
89 * Note that the resulting size also contains bytes needed for alignment
90 * padding as well as padding to ensure that data structures don't cross a
91 * 4k-boundary where required.
92 */
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{
95 size_t len;
96
97 /* Main data. */
98 len = sizeof(struct itcw);
99 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
100 /* TSB */ sizeof(struct tsb) +
101 /* TIDAL */ max_tidaws * sizeof(struct tidaw);
102 /* Interrogate data. */
103 if (intrg) {
104 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
105 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 }
108 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
112 len += max(max_tidaws, intrg_max_tidaws) *
113 sizeof(struct tidaw) - 1;
114 return len;
115}
116EXPORT_SYMBOL(itcw_calc_size);
117
118#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
119
120static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
121 int align, int check_4k)
122{
123 addr_t addr;
124
125 addr = ALIGN(*start, align);
126 if (check_4k && CROSS4K(addr, len)) {
127 addr = ALIGN(addr, 4096);
128 addr = ALIGN(addr, align);
129 }
130 if (addr + len > end)
131 return ERR_PTR(-ENOSPC);
132 *start = addr + len;
133 return (void *) addr;
134}
135
136/**
137 * itcw_init - initialize incremental tcw data structure
138 * @buffer: address of buffer to use for data structures
139 * @size: number of bytes in buffer
140 * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
141 * operation tcw
142 * @intrg: if non-zero, add and initialize an interrogate tcw
143 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
144 * if no tida is to be used.
145 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
146 * by the interrogate tcw, if specified
147 *
148 * Prepare the specified buffer to be used as an incremental tcw, i.e. a
149 * helper data structure that can be used to construct a valid tcw by
150 * successive calls to other helper functions. Note: the buffer needs to be
151 * located below the 2G address limit. The resulting tcw has the following
152 * restrictions:
153 * - no tccb tidal
154 * - input/output tidal is contiguous (no ttic)
155 * - total data should not exceed 4k
156 * - tcw specifies either read or write operation
157 *
158 * On success, return pointer to the resulting incremental tcw data structure,
159 * ERR_PTR otherwise.
160 */
161struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
162 int max_tidaws, int intrg_max_tidaws)
163{
164 struct itcw *itcw;
165 void *chunk;
166 addr_t start;
167 addr_t end;
168
169 /* Check for 2G limit. */
170 start = (addr_t) buffer;
171 end = start + size;
172 if (end > (1 << 31))
173 return ERR_PTR(-EINVAL);
174 memset(buffer, 0, size);
175 /* ITCW. */
176 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
177 if (IS_ERR(chunk))
178 return chunk;
179 itcw = chunk;
180 itcw->max_tidaws = max_tidaws;
181 itcw->intrg_max_tidaws = intrg_max_tidaws;
182 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk))
185 return chunk;
186 itcw->tcw = chunk;
187 tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
188 (op == ITCW_OP_WRITE) ? 1 : 0);
189 /* Interrogate TCW. */
190 if (intrg) {
191 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
192 if (IS_ERR(chunk))
193 return chunk;
194 itcw->intrg_tcw = chunk;
195 tcw_init(itcw->intrg_tcw, 1, 0);
196 tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
197 }
198 /* Data TIDAL. */
199 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1);
202 if (IS_ERR(chunk))
203 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1);
205 }
206 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1);
210 if (IS_ERR(chunk))
211 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1);
213 }
214 /* TSB. */
215 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
216 if (IS_ERR(chunk))
217 return chunk;
218 tsb_init(chunk);
219 tcw_set_tsb(itcw->tcw, chunk);
220 /* Interrogate TSB. */
221 if (intrg) {
222 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
223 if (IS_ERR(chunk))
224 return chunk;
225 tsb_init(chunk);
226 tcw_set_tsb(itcw->intrg_tcw, chunk);
227 }
228 /* TCCB. */
229 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
230 if (IS_ERR(chunk))
231 return chunk;
232 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
233 tcw_set_tccb(itcw->tcw, chunk);
234 /* Interrogate TCCB. */
235 if (intrg) {
236 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
237 if (IS_ERR(chunk))
238 return chunk;
239 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
240 tcw_set_tccb(itcw->intrg_tcw, chunk);
241 tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
242 sizeof(struct dcw_intrg_data), 0);
243 tcw_finalize(itcw->intrg_tcw, 0);
244 }
245 return itcw;
246}
247EXPORT_SYMBOL(itcw_init);
248
249/**
250 * itcw_add_dcw - add a dcw to the itcw
251 * @itcw: address of the itcw
252 * @cmd: the dcw command
253 * @flags: flags for the dcw
254 * @cd: address of control data for this dcw or NULL if none is required
255 * @cd_count: number of control data bytes for this dcw
256 * @count: number of data bytes for this dcw
257 *
258 * Add a new dcw to the specified itcw by writing the dcw information specified
259 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
260 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
261 * would exceed the available space.
262 *
263 * Note: the tcal field of the tccb header will be updated to reflect added
264 * content.
265 */
266struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
267 u8 cd_count, u32 count)
268{
269 return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
270 flags, cd, cd_count, count);
271}
272EXPORT_SYMBOL(itcw_add_dcw);
273
274/**
275 * itcw_add_tidaw - add a tidaw to the itcw
276 * @itcw: address of the itcw
277 * @flags: flags for the new tidaw
278 * @addr: address value for the new tidaw
279 * @count: count value for the new tidaw
280 *
281 * Add a new tidaw to the input/output data tidaw-list of the specified itcw
282 * (depending on the value of the r-flag and w-flag). Return a pointer to
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space.
285 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
288 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{
291 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC);
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294}
295EXPORT_SYMBOL(itcw_add_tidaw);
296
297/**
298 * itcw_set_data - set data address and tida flag of the itcw
299 * @itcw: address of the itcw
300 * @addr: the data address
301 * @use_tidal: zero of the data address specifies a contiguous block of data,
302 * non-zero if it specifies a list if tidaws.
303 *
304 * Set the input/output data address of the itcw (depending on the value of the
305 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
306 * is set as well.
307 */
308void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
309{
310 tcw_set_data(itcw->tcw, addr, use_tidal);
311}
312EXPORT_SYMBOL(itcw_set_data);
313
314/**
315 * itcw_finalize - calculate length and count fields of the itcw
316 * @itcw: address of the itcw
317 *
318 * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
319 * In case input- or output-tida is used, the tidaw-list must be stored in
320 * continuous storage (no ttic). The tcal field in the tccb must be
321 * up-to-date.
322 */
323void itcw_finalize(struct itcw *itcw)
324{
325 tcw_finalize(itcw->tcw, itcw->num_tidaws);
326}
327EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
deleted file mode 100644
index 445cf364e461..000000000000
--- a/drivers/s390/cio/qdio.c
+++ /dev/null
@@ -1,3934 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/cio/qdio.c
4 *
5 * Linux for S/390 QDIO base support, Hipersocket base support
6 * version 2
7 *
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 *
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
14 *
15 *
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/slab.h>
37#include <linux/kernel.h>
38#include <linux/proc_fs.h>
39#include <linux/timer.h>
40#include <linux/mempool.h>
41#include <linux/semaphore.h>
42
43#include <asm/ccwdev.h>
44#include <asm/io.h>
45#include <asm/atomic.h>
46#include <asm/timex.h>
47
48#include <asm/debug.h>
49#include <asm/s390_rdev.h>
50#include <asm/qdio.h>
51#include <asm/airq.h>
52
53#include "cio.h"
54#include "css.h"
55#include "device.h"
56#include "qdio.h"
57#include "ioasm.h"
58#include "chsc.h"
59
60/****************** MODULE PARAMETER VARIABLES ********************/
61MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62MODULE_DESCRIPTION("QDIO base support version 2, " \
63 "Copyright 2000 IBM Corporation");
64MODULE_LICENSE("GPL");
65
66/******************** HERE WE GO ***********************************/
67
68static const char version[] = "QDIO base support version 2";
69
70static int qdio_performance_stats = 0;
71static int proc_perf_file_registration;
72static struct qdio_perf_stats perf_stats;
73
74static int hydra_thinints;
75static int is_passthrough = 0;
76static int omit_svs;
77
78static int indicator_used[INDICATORS_PER_CACHELINE];
79static __u32 * volatile indicators;
80static __u32 volatile spare_indicator;
81static atomic_t spare_indicator_usecount;
82#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83static mempool_t *qdio_mempool_scssc;
84static struct kmem_cache *qdio_q_cache;
85
86static debug_info_t *qdio_dbf_setup;
87static debug_info_t *qdio_dbf_sbal;
88static debug_info_t *qdio_dbf_trace;
89static debug_info_t *qdio_dbf_sense;
90#ifdef CONFIG_QDIO_DEBUG
91static debug_info_t *qdio_dbf_slsb_out;
92static debug_info_t *qdio_dbf_slsb_in;
93#endif /* CONFIG_QDIO_DEBUG */
94
95/* iQDIO stuff: */
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock);
99static void *tiqdio_ind;
100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103/* not a macro, as one of the arguments is atomic_read */
104static inline int
105qdio_min(int a,int b)
106{
107 if (a<b)
108 return a;
109 else
110 return b;
111}
112
113/***************** SCRUBBER HELPER ROUTINES **********************/
114#ifdef CONFIG_64BIT
115static inline void qdio_perf_stat_inc(atomic64_t *count)
116{
117 if (qdio_performance_stats)
118 atomic64_inc(count);
119}
120
121static inline void qdio_perf_stat_dec(atomic64_t *count)
122{
123 if (qdio_performance_stats)
124 atomic64_dec(count);
125}
126#else /* CONFIG_64BIT */
127static inline void qdio_perf_stat_inc(atomic_t *count)
128{
129 if (qdio_performance_stats)
130 atomic_inc(count);
131}
132
133static inline void qdio_perf_stat_dec(atomic_t *count)
134{
135 if (qdio_performance_stats)
136 atomic_dec(count);
137}
138#endif /* CONFIG_64BIT */
139
140static inline __u64
141qdio_get_micros(void)
142{
143 return (get_clock() >> 12); /* time>>12 is microseconds */
144}
145
146/*
147 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
148 * the q in any case, so that we'll not be interrupted when we are in
149 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
150 * ever works (last famous words)
151 */
152static inline int
153qdio_reserve_q(struct qdio_q *q)
154{
155 return atomic_add_return(1,&q->use_count) - 1;
156}
157
158static inline void
159qdio_release_q(struct qdio_q *q)
160{
161 atomic_dec(&q->use_count);
162}
163
164/*check ccq */
165static int
166qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
167{
168 char dbf_text[15];
169
170 if (ccq == 0 || ccq == 32)
171 return 0;
172 if (ccq == 96 || ccq == 97)
173 return 1;
174 /*notify devices immediately*/
175 sprintf(dbf_text,"%d", ccq);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 return -EIO;
178}
179/* EQBS: extract buffer states */
180static int
181qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
182 unsigned int *start, unsigned int *cnt)
183{
184 struct qdio_irq *irq;
185 unsigned int tmp_cnt, q_no, ccq;
186 int rc ;
187 char dbf_text[15];
188
189 ccq = 0;
190 tmp_cnt = *cnt;
191 irq = (struct qdio_irq*)q->irq_ptr;
192 q_no = q->q_no;
193 if(!q->is_input_q)
194 q_no += irq->no_input_qs;
195again:
196 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
197 rc = qdio_check_ccq(q, ccq);
198 if ((ccq == 96) && (tmp_cnt != *cnt))
199 rc = 0;
200 if (rc == 1) {
201 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
202 goto again;
203 }
204 if (rc < 0) {
205 QDIO_DBF_TEXT2(1,trace,"eqberr");
206 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
207 QDIO_DBF_TEXT2(1,trace,dbf_text);
208 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
209 QDIO_STATUS_LOOK_FOR_ERROR,
210 0, 0, 0, -1, -1, q->int_parm);
211 return 0;
212 }
213 return (tmp_cnt - *cnt);
214}
215
216/* SQBS: set buffer states */
217static int
218qdio_do_sqbs(struct qdio_q *q, unsigned char state,
219 unsigned int *start, unsigned int *cnt)
220{
221 struct qdio_irq *irq;
222 unsigned int tmp_cnt, q_no, ccq;
223 int rc;
224 char dbf_text[15];
225
226 ccq = 0;
227 tmp_cnt = *cnt;
228 irq = (struct qdio_irq*)q->irq_ptr;
229 q_no = q->q_no;
230 if(!q->is_input_q)
231 q_no += irq->no_input_qs;
232again:
233 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
234 rc = qdio_check_ccq(q, ccq);
235 if (rc == 1) {
236 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
237 goto again;
238 }
239 if (rc < 0) {
240 QDIO_DBF_TEXT3(1,trace,"sqberr");
241 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
242 QDIO_DBF_TEXT3(1,trace,dbf_text);
243 sprintf(dbf_text,"%d,%d",ccq,q_no);
244 QDIO_DBF_TEXT3(1,trace,dbf_text);
245 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
246 QDIO_STATUS_LOOK_FOR_ERROR,
247 0, 0, 0, -1, -1, q->int_parm);
248 return 0;
249 }
250 return (tmp_cnt - *cnt);
251}
252
253static inline int
254qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
255 unsigned char state, unsigned int *count)
256{
257 volatile char *slsb;
258 struct qdio_irq *irq;
259
260 irq = (struct qdio_irq*)q->irq_ptr;
261 if (!irq->is_qebsm) {
262 slsb = (char *)&q->slsb.acc.val[(*bufno)];
263 xchg(slsb, state);
264 return 1;
265 }
266 return qdio_do_sqbs(q, state, bufno, count);
267}
268
269#ifdef CONFIG_QDIO_DEBUG
270static inline void
271qdio_trace_slsb(struct qdio_q *q)
272{
273 if (q->queue_type==QDIO_TRACE_QTYPE) {
274 if (q->is_input_q)
275 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
276 QDIO_MAX_BUFFERS_PER_Q);
277 else
278 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
279 QDIO_MAX_BUFFERS_PER_Q);
280 }
281}
282#endif
283
284static inline int
285set_slsb(struct qdio_q *q, unsigned int *bufno,
286 unsigned char state, unsigned int *count)
287{
288 int rc;
289#ifdef CONFIG_QDIO_DEBUG
290 qdio_trace_slsb(q);
291#endif
292 rc = qdio_set_slsb(q, bufno, state, count);
293#ifdef CONFIG_QDIO_DEBUG
294 qdio_trace_slsb(q);
295#endif
296 return rc;
297}
298static inline int
299qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
300 unsigned int gpr3)
301{
302 int cc;
303
304 QDIO_DBF_TEXT4(0,trace,"sigasync");
305 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
306
307 qdio_perf_stat_inc(&perf_stats.siga_syncs);
308
309 cc = do_siga_sync(q->schid, gpr2, gpr3);
310 if (cc)
311 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
312
313 return cc;
314}
315
316static inline int
317qdio_siga_sync_q(struct qdio_q *q)
318{
319 if (q->is_input_q)
320 return qdio_siga_sync(q, 0, q->mask);
321 return qdio_siga_sync(q, q->mask, 0);
322}
323
324static int
325__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
326{
327 struct qdio_irq *irq;
328 unsigned int fc = 0;
329 unsigned long schid;
330
331 irq = (struct qdio_irq *) q->irq_ptr;
332 if (!irq->is_qebsm)
333 schid = *((u32 *)&q->schid);
334 else {
335 schid = irq->sch_token;
336 fc |= 0x80;
337 }
338 return do_siga_output(schid, q->mask, busy_bit, fc);
339}
340
341/*
342 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
343 * an access exception
344 */
345static int
346qdio_siga_output(struct qdio_q *q)
347{
348 int cc;
349 __u32 busy_bit;
350 __u64 start_time=0;
351
352 qdio_perf_stat_inc(&perf_stats.siga_outs);
353
354 QDIO_DBF_TEXT4(0,trace,"sigaout");
355 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
356
357 for (;;) {
358 cc = __do_siga_output(q, &busy_bit);
359//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
360 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
361 if (!start_time)
362 start_time=NOW;
363 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
364 break;
365 } else
366 break;
367 }
368
369 if ((cc==2) && (busy_bit))
370 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
371
372 if (cc)
373 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
374
375 return cc;
376}
377
378static int
379qdio_siga_input(struct qdio_q *q)
380{
381 int cc;
382
383 QDIO_DBF_TEXT4(0,trace,"sigain");
384 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
385
386 qdio_perf_stat_inc(&perf_stats.siga_ins);
387
388 cc = do_siga_input(q->schid, q->mask);
389
390 if (cc)
391 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
392
393 return cc;
394}
395
396/* locked by the locks in qdio_activate and qdio_cleanup */
397static __u32 *
398qdio_get_indicator(void)
399{
400 int i;
401
402 for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403 if (!indicator_used[i]) {
404 indicator_used[i]=1;
405 return indicators+i;
406 }
407 atomic_inc(&spare_indicator_usecount);
408 return (__u32 * volatile) &spare_indicator;
409}
410
411/* locked by the locks in qdio_activate and qdio_cleanup */
412static void
413qdio_put_indicator(__u32 *addr)
414{
415 int i;
416
417 if ( (addr) && (addr!=&spare_indicator) ) {
418 i=addr-indicators;
419 indicator_used[i]=0;
420 }
421 if (addr == &spare_indicator)
422 atomic_dec(&spare_indicator_usecount);
423}
424
425static inline void
426tiqdio_clear_summary_bit(__u32 *location)
427{
428 QDIO_DBF_TEXT5(0,trace,"clrsummb");
429 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
430
431 xchg(location,0);
432}
433
434static inline void
435tiqdio_set_summary_bit(__u32 *location)
436{
437 QDIO_DBF_TEXT5(0,trace,"setsummb");
438 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
439
440 xchg(location,-1);
441}
442
443static inline void
444tiqdio_sched_tl(void)
445{
446 tasklet_hi_schedule(&tiqdio_tasklet);
447}
448
449static void
450qdio_mark_tiq(struct qdio_q *q)
451{
452 unsigned long flags;
453
454 QDIO_DBF_TEXT4(0,trace,"mark iq");
455 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
456
457 spin_lock_irqsave(&ttiq_list_lock,flags);
458 if (unlikely(atomic_read(&q->is_in_shutdown)))
459 goto out_unlock;
460
461 if (!q->is_input_q)
462 goto out_unlock;
463
464 if ((q->list_prev) || (q->list_next))
465 goto out_unlock;
466
467 if (!tiq_list) {
468 tiq_list=q;
469 q->list_prev=q;
470 q->list_next=q;
471 } else {
472 q->list_next=tiq_list;
473 q->list_prev=tiq_list->list_prev;
474 tiq_list->list_prev->list_next=q;
475 tiq_list->list_prev=q;
476 }
477 spin_unlock_irqrestore(&ttiq_list_lock,flags);
478
479 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
480 tiqdio_sched_tl();
481 return;
482out_unlock:
483 spin_unlock_irqrestore(&ttiq_list_lock,flags);
484 return;
485}
486
487static inline void
488qdio_mark_q(struct qdio_q *q)
489{
490 QDIO_DBF_TEXT4(0,trace,"mark q");
491 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
492
493 if (unlikely(atomic_read(&q->is_in_shutdown)))
494 return;
495
496 tasklet_schedule(&q->tasklet);
497}
498
499static int
500qdio_stop_polling(struct qdio_q *q)
501{
502#ifdef QDIO_USE_PROCESSING_STATE
503 unsigned int tmp, gsf, count = 1;
504 unsigned char state = 0;
505 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
506
507 if (!atomic_xchg(&q->polling,0))
508 return 1;
509
510 QDIO_DBF_TEXT4(0,trace,"stoppoll");
511 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
512
513 /* show the card that we are not polling anymore */
514 if (!q->is_input_q)
515 return 1;
516
517 tmp = gsf = GET_SAVED_FRONTIER(q);
518 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
519 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
520
521 /*
522 * we don't issue this SYNC_MEMORY, as we trust Rick T and
523 * moreover will not use the PROCESSING state under VM, so
524 * q->polling was 0 anyway
525 */
526 /*SYNC_MEMORY;*/
527 if (irq->is_qebsm) {
528 count = 1;
529 qdio_do_eqbs(q, &state, &gsf, &count);
530 } else
531 state = q->slsb.acc.val[gsf];
532 if (state != SLSB_P_INPUT_PRIMED)
533 return 1;
534 /*
535 * set our summary bit again, as otherwise there is a
536 * small window we can miss between resetting it and
537 * checking for PRIMED state
538 */
539 if (q->is_thinint_q)
540 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
541 return 0;
542
543#else /* QDIO_USE_PROCESSING_STATE */
544 return 1;
545#endif /* QDIO_USE_PROCESSING_STATE */
546}
547
548/*
549 * see the comment in do_QDIO and before qdio_reserve_q about the
550 * sophisticated locking outside of unmark_q, so that we don't need to
551 * disable the interrupts :-)
552*/
553static void
554qdio_unmark_q(struct qdio_q *q)
555{
556 unsigned long flags;
557
558 QDIO_DBF_TEXT4(0,trace,"unmark q");
559 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
560
561 if ((!q->list_prev)||(!q->list_next))
562 return;
563
564 if ((q->is_thinint_q)&&(q->is_input_q)) {
565 /* iQDIO */
566 spin_lock_irqsave(&ttiq_list_lock,flags);
567 /* in case cleanup has done this already and simultanously
568 * qdio_unmark_q is called from the interrupt handler, we've
569 * got to check this in this specific case again */
570 if ((!q->list_prev)||(!q->list_next))
571 goto out;
572 if (q->list_next==q) {
573 /* q was the only interesting q */
574 tiq_list=NULL;
575 q->list_next=NULL;
576 q->list_prev=NULL;
577 } else {
578 q->list_next->list_prev=q->list_prev;
579 q->list_prev->list_next=q->list_next;
580 tiq_list=q->list_next;
581 q->list_next=NULL;
582 q->list_prev=NULL;
583 }
584out:
585 spin_unlock_irqrestore(&ttiq_list_lock,flags);
586 }
587}
588
589static inline unsigned long
590tiqdio_clear_global_summary(void)
591{
592 unsigned long time;
593
594 QDIO_DBF_TEXT5(0,trace,"clrglobl");
595
596 time = do_clear_global_summary();
597
598 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
599
600 return time;
601}
602
603
604/************************* OUTBOUND ROUTINES *******************************/
605static int
606qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
607{
608 struct qdio_irq *irq;
609 unsigned char state;
610 unsigned int cnt, count, ftc;
611
612 irq = (struct qdio_irq *) q->irq_ptr;
613 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
614 SYNC_MEMORY;
615
616 ftc = q->first_to_check;
617 count = qdio_min(atomic_read(&q->number_of_buffers_used),
618 (QDIO_MAX_BUFFERS_PER_Q-1));
619 if (count == 0)
620 return q->first_to_check;
621 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
622 if (cnt == 0)
623 return q->first_to_check;
624 switch (state) {
625 case SLSB_P_OUTPUT_ERROR:
626 QDIO_DBF_TEXT3(0,trace,"outperr");
627 atomic_sub(cnt , &q->number_of_buffers_used);
628 if (q->qdio_error)
629 q->error_status_flags |=
630 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
631 q->qdio_error = SLSB_P_OUTPUT_ERROR;
632 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
633 q->first_to_check = ftc;
634 break;
635 case SLSB_P_OUTPUT_EMPTY:
636 QDIO_DBF_TEXT5(0,trace,"outpempt");
637 atomic_sub(cnt, &q->number_of_buffers_used);
638 q->first_to_check = ftc;
639 break;
640 case SLSB_CU_OUTPUT_PRIMED:
641 /* all buffers primed */
642 QDIO_DBF_TEXT5(0,trace,"outpprim");
643 break;
644 default:
645 break;
646 }
647 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
648 return q->first_to_check;
649}
650
651static int
652qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
653{
654 struct qdio_irq *irq;
655 unsigned char state;
656 int tmp, ftc, count, cnt;
657 char dbf_text[15];
658
659
660 irq = (struct qdio_irq *) q->irq_ptr;
661 ftc = q->first_to_check;
662 count = qdio_min(atomic_read(&q->number_of_buffers_used),
663 (QDIO_MAX_BUFFERS_PER_Q-1));
664 if (count == 0)
665 return q->first_to_check;
666 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
667 if (cnt == 0)
668 return q->first_to_check;
669 switch (state) {
670 case SLSB_P_INPUT_ERROR :
671#ifdef CONFIG_QDIO_DEBUG
672 QDIO_DBF_TEXT3(1,trace,"inperr");
673 sprintf(dbf_text,"%2x,%2x",ftc,count);
674 QDIO_DBF_TEXT3(1,trace,dbf_text);
675#endif /* CONFIG_QDIO_DEBUG */
676 if (q->qdio_error)
677 q->error_status_flags |=
678 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
679 q->qdio_error = SLSB_P_INPUT_ERROR;
680 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
681 atomic_sub(cnt, &q->number_of_buffers_used);
682 q->first_to_check = ftc;
683 break;
684 case SLSB_P_INPUT_PRIMED :
685 QDIO_DBF_TEXT3(0,trace,"inptprim");
686 sprintf(dbf_text,"%2x,%2x",ftc,count);
687 QDIO_DBF_TEXT3(1,trace,dbf_text);
688 tmp = 0;
689 ftc = q->first_to_check;
690#ifdef QDIO_USE_PROCESSING_STATE
691 if (cnt > 1) {
692 cnt -= 1;
693 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
694 if (!tmp)
695 break;
696 }
697 cnt = 1;
698 tmp += set_slsb(q, &ftc,
699 SLSB_P_INPUT_PROCESSING, &cnt);
700 atomic_set(&q->polling, 1);
701#else
702 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
703#endif
704 atomic_sub(tmp, &q->number_of_buffers_used);
705 q->first_to_check = ftc;
706 break;
707 case SLSB_CU_INPUT_EMPTY:
708 case SLSB_P_INPUT_NOT_INIT:
709 case SLSB_P_INPUT_PROCESSING:
710 QDIO_DBF_TEXT5(0,trace,"inpnipro");
711 break;
712 default:
713 break;
714 }
715 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
716 return q->first_to_check;
717}
718
719static int
720qdio_get_outbound_buffer_frontier(struct qdio_q *q)
721{
722 struct qdio_irq *irq;
723 volatile char *slsb;
724 unsigned int count = 1;
725 int first_not_to_check, f, f_mod_no;
726 char dbf_text[15];
727
728 QDIO_DBF_TEXT4(0,trace,"getobfro");
729 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
730
731 irq = (struct qdio_irq *) q->irq_ptr;
732 if (irq->is_qebsm)
733 return qdio_qebsm_get_outbound_buffer_frontier(q);
734
735 slsb=&q->slsb.acc.val[0];
736 f_mod_no=f=q->first_to_check;
737 /*
738 * f points to already processed elements, so f+no_used is correct...
739 * ... but: we don't check 128 buffers, as otherwise
740 * qdio_has_outbound_q_moved would return 0
741 */
742 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
743 (QDIO_MAX_BUFFERS_PER_Q-1));
744
745 if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
746 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
747 SYNC_MEMORY;
748
749check_next:
750 if (f==first_not_to_check)
751 goto out;
752
753 switch(slsb[f_mod_no]) {
754
755 /* the adapter has not fetched the output yet */
756 case SLSB_CU_OUTPUT_PRIMED:
757 QDIO_DBF_TEXT5(0,trace,"outpprim");
758 break;
759
760 /* the adapter got it */
761 case SLSB_P_OUTPUT_EMPTY:
762 atomic_dec(&q->number_of_buffers_used);
763 f++;
764 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
765 QDIO_DBF_TEXT5(0,trace,"outpempt");
766 goto check_next;
767
768 case SLSB_P_OUTPUT_ERROR:
769 QDIO_DBF_TEXT3(0,trace,"outperr");
770 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
771 q->sbal[f_mod_no]->element[14].sbalf.value,
772 q->sbal[f_mod_no]->element[15].sbalf.value);
773 QDIO_DBF_TEXT3(1,trace,dbf_text);
774 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
775
776 /* kind of process the buffer */
777 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
778
779 /*
780 * we increment the frontier, as this buffer
781 * was processed obviously
782 */
783 atomic_dec(&q->number_of_buffers_used);
784 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
785
786 if (q->qdio_error)
787 q->error_status_flags|=
788 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
789 q->qdio_error=SLSB_P_OUTPUT_ERROR;
790 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
791
792 break;
793
794 /* no new buffers */
795 default:
796 QDIO_DBF_TEXT5(0,trace,"outpni");
797 }
798out:
799 return (q->first_to_check=f_mod_no);
800}
801
802/* all buffers are processed */
803static int
804qdio_is_outbound_q_done(struct qdio_q *q)
805{
806 int no_used;
807#ifdef CONFIG_QDIO_DEBUG
808 char dbf_text[15];
809#endif
810
811 no_used=atomic_read(&q->number_of_buffers_used);
812
813#ifdef CONFIG_QDIO_DEBUG
814 if (no_used) {
815 sprintf(dbf_text,"oqisnt%02x",no_used);
816 QDIO_DBF_TEXT4(0,trace,dbf_text);
817 } else {
818 QDIO_DBF_TEXT4(0,trace,"oqisdone");
819 }
820 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
821#endif /* CONFIG_QDIO_DEBUG */
822 return (no_used==0);
823}
824
825static int
826qdio_has_outbound_q_moved(struct qdio_q *q)
827{
828 int i;
829
830 i=qdio_get_outbound_buffer_frontier(q);
831
832 if ( (i!=GET_SAVED_FRONTIER(q)) ||
833 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
834 SAVE_FRONTIER(q,i);
835 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
836 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
837 return 1;
838 } else {
839 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
840 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
841 return 0;
842 }
843}
844
845static void
846qdio_kick_outbound_q(struct qdio_q *q)
847{
848 int result;
849#ifdef CONFIG_QDIO_DEBUG
850 char dbf_text[15];
851
852 QDIO_DBF_TEXT4(0,trace,"kickoutq");
853 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
854#endif /* CONFIG_QDIO_DEBUG */
855
856 if (!q->siga_out)
857 return;
858
859 /* here's the story with cc=2 and busy bit set (thanks, Rick):
860 * VM's CP could present us cc=2 and busy bit set on SIGA-write
861 * during reconfiguration of their Guest LAN (only in HIPERS mode,
862 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
863 * the queues down immediately; and not being under VM we have a
864 * problem on cc=2 and busy bit set right away).
865 *
866 * Therefore qdio_siga_output will try for a short time constantly,
867 * if such a condition occurs. If it doesn't change, it will
868 * increase the busy_siga_counter and save the timestamp, and
869 * schedule the queue for later processing (via mark_q, using the
870 * queue tasklet). __qdio_outbound_processing will check out the
871 * counter. If non-zero, it will call qdio_kick_outbound_q as often
872 * as the value of the counter. This will attempt further SIGA
873 * instructions. For each successful SIGA, the counter is
874 * decreased, for failing SIGAs the counter remains the same, after
875 * all.
876 * After some time of no movement, qdio_kick_outbound_q will
877 * finally fail and reflect corresponding error codes to call
878 * the upper layer module and have it take the queues down.
879 *
880 * Note that this is a change from the original HiperSockets design
881 * (saying cc=2 and busy bit means take the queues down), but in
882 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
883 * conditions will still take the queues down, but the threshold is
884 * higher due to the Guest LAN environment.
885 */
886
887
888 result=qdio_siga_output(q);
889
890 switch (result) {
891 case 0:
892 /* went smooth this time, reset timestamp */
893#ifdef CONFIG_QDIO_DEBUG
894 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
895 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
896 atomic_read(&q->busy_siga_counter));
897 QDIO_DBF_TEXT3(0,trace,dbf_text);
898#endif /* CONFIG_QDIO_DEBUG */
899 q->timing.busy_start=0;
900 break;
901 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
902 /* cc=2 and busy bit: */
903 atomic_inc(&q->busy_siga_counter);
904
905 /* if the last siga was successful, save
906 * timestamp here */
907 if (!q->timing.busy_start)
908 q->timing.busy_start=NOW;
909
910 /* if we're in time, don't touch error_status_flags
911 * and siga_error */
912 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
913 qdio_mark_q(q);
914 break;
915 }
916 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
917#ifdef CONFIG_QDIO_DEBUG
918 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
919 atomic_read(&q->busy_siga_counter));
920 QDIO_DBF_TEXT3(0,trace,dbf_text);
921#endif /* CONFIG_QDIO_DEBUG */
922 /* else fallthrough and report error */
923 default:
924 /* for plain cc=1, 2 or 3: */
925 if (q->siga_error)
926 q->error_status_flags|=
927 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
928 q->error_status_flags|=
929 QDIO_STATUS_LOOK_FOR_ERROR;
930 q->siga_error=result;
931 }
932}
933
934static void
935qdio_kick_outbound_handler(struct qdio_q *q)
936{
937 int start, end, real_end, count;
938#ifdef CONFIG_QDIO_DEBUG
939 char dbf_text[15];
940#endif
941
942 start = q->first_element_to_kick;
943 /* last_move_ftc was just updated */
944 real_end = GET_SAVED_FRONTIER(q);
945 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
946 (QDIO_MAX_BUFFERS_PER_Q-1);
947 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
948 (QDIO_MAX_BUFFERS_PER_Q-1);
949
950#ifdef CONFIG_QDIO_DEBUG
951 QDIO_DBF_TEXT4(0,trace,"kickouth");
952 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
953
954 sprintf(dbf_text,"s=%2xc=%2x",start,count);
955 QDIO_DBF_TEXT4(0,trace,dbf_text);
956#endif /* CONFIG_QDIO_DEBUG */
957
958 if (q->state==QDIO_IRQ_STATE_ACTIVE)
959 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
960 q->error_status_flags,
961 q->qdio_error,q->siga_error,q->q_no,start,count,
962 q->int_parm);
963
964 /* for the next time: */
965 q->first_element_to_kick=real_end;
966 q->qdio_error=0;
967 q->siga_error=0;
968 q->error_status_flags=0;
969}
970
971static void
972__qdio_outbound_processing(struct qdio_q *q)
973{
974 int siga_attempts;
975
976 QDIO_DBF_TEXT4(0,trace,"qoutproc");
977 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
978
979 if (unlikely(qdio_reserve_q(q))) {
980 qdio_release_q(q);
981 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
982 /* as we're sissies, we'll check next time */
983 if (likely(!atomic_read(&q->is_in_shutdown))) {
984 qdio_mark_q(q);
985 QDIO_DBF_TEXT4(0,trace,"busy,agn");
986 }
987 return;
988 }
989 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
990 qdio_perf_stat_inc(&perf_stats.tl_runs);
991
992 /* see comment in qdio_kick_outbound_q */
993 siga_attempts=atomic_read(&q->busy_siga_counter);
994 while (siga_attempts) {
995 atomic_dec(&q->busy_siga_counter);
996 qdio_kick_outbound_q(q);
997 siga_attempts--;
998 }
999
1000 if (qdio_has_outbound_q_moved(q))
1001 qdio_kick_outbound_handler(q);
1002
1003 if (q->queue_type == QDIO_ZFCP_QFMT) {
1004 if ((!q->hydra_gives_outbound_pcis) &&
1005 (!qdio_is_outbound_q_done(q)))
1006 qdio_mark_q(q);
1007 }
1008 else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
1009 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
1010 /*
1011 * make sure buffer switch from PRIMED to EMPTY is noticed
1012 * and outbound_handler is called
1013 */
1014 if (qdio_is_outbound_q_done(q)) {
1015 del_timer(&q->timer);
1016 } else {
1017 if (!timer_pending(&q->timer))
1018 mod_timer(&q->timer, jiffies +
1019 QDIO_FORCE_CHECK_TIMEOUT);
1020 }
1021 }
1022
1023 qdio_release_q(q);
1024}
1025
1026static void
1027qdio_outbound_processing(unsigned long q)
1028{
1029 __qdio_outbound_processing((struct qdio_q *) q);
1030}
1031
1032/************************* INBOUND ROUTINES *******************************/
1033
1034
1035static int
1036qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1037{
1038 struct qdio_irq *irq;
1039 int f,f_mod_no;
1040 volatile char *slsb;
1041 unsigned int count = 1;
1042 int first_not_to_check;
1043#ifdef CONFIG_QDIO_DEBUG
1044 char dbf_text[15];
1045#endif /* CONFIG_QDIO_DEBUG */
1046#ifdef QDIO_USE_PROCESSING_STATE
1047 int last_position=-1;
1048#endif /* QDIO_USE_PROCESSING_STATE */
1049
1050 QDIO_DBF_TEXT4(0,trace,"getibfro");
1051 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1052
1053 irq = (struct qdio_irq *) q->irq_ptr;
1054 if (irq->is_qebsm)
1055 return qdio_qebsm_get_inbound_buffer_frontier(q);
1056
1057 slsb=&q->slsb.acc.val[0];
1058 f_mod_no=f=q->first_to_check;
1059 /*
1060 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1061 * would return 0
1062 */
1063 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1064 (QDIO_MAX_BUFFERS_PER_Q-1));
1065
1066 /*
1067 * we don't use this one, as a PCI or we after a thin interrupt
1068 * will sync the queues
1069 */
1070 /* SYNC_MEMORY;*/
1071
1072check_next:
1073 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1074 if (f==first_not_to_check)
1075 goto out;
1076 switch (slsb[f_mod_no]) {
1077
1078 /* CU_EMPTY means frontier is reached */
1079 case SLSB_CU_INPUT_EMPTY:
1080 QDIO_DBF_TEXT5(0,trace,"inptempt");
1081 break;
1082
1083 /* P_PRIMED means set slsb to P_PROCESSING and move on */
1084 case SLSB_P_INPUT_PRIMED:
1085 QDIO_DBF_TEXT5(0,trace,"inptprim");
1086
1087#ifdef QDIO_USE_PROCESSING_STATE
1088 /*
1089 * as soon as running under VM, polling the input queues will
1090 * kill VM in terms of CP overhead
1091 */
1092 if (q->siga_sync) {
1093 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1094 } else {
1095 /* set the previous buffer to NOT_INIT. The current
1096 * buffer will be set to PROCESSING at the end of
1097 * this function to avoid further interrupts. */
1098 if (last_position>=0)
1099 set_slsb(q, &last_position,
1100 SLSB_P_INPUT_NOT_INIT, &count);
1101 atomic_set(&q->polling,1);
1102 last_position=f_mod_no;
1103 }
1104#else /* QDIO_USE_PROCESSING_STATE */
1105 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1106#endif /* QDIO_USE_PROCESSING_STATE */
1107 /*
1108 * not needed, as the inbound queue will be synced on the next
1109 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1110 */
1111 /*SYNC_MEMORY;*/
1112 f++;
1113 atomic_dec(&q->number_of_buffers_used);
1114 goto check_next;
1115
1116 case SLSB_P_INPUT_NOT_INIT:
1117 case SLSB_P_INPUT_PROCESSING:
1118 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1119 break;
1120
1121 /* P_ERROR means frontier is reached, break and report error */
1122 case SLSB_P_INPUT_ERROR:
1123#ifdef CONFIG_QDIO_DEBUG
1124 sprintf(dbf_text,"inperr%2x",f_mod_no);
1125 QDIO_DBF_TEXT3(1,trace,dbf_text);
1126#endif /* CONFIG_QDIO_DEBUG */
1127 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1128
1129 /* kind of process the buffer */
1130 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1131
1132 if (q->qdio_error)
1133 q->error_status_flags|=
1134 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1135 q->qdio_error=SLSB_P_INPUT_ERROR;
1136 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1137
1138 /* we increment the frontier, as this buffer
1139 * was processed obviously */
1140 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1141 atomic_dec(&q->number_of_buffers_used);
1142
1143#ifdef QDIO_USE_PROCESSING_STATE
1144 last_position=-1;
1145#endif /* QDIO_USE_PROCESSING_STATE */
1146
1147 break;
1148
1149 /* everything else means frontier not changed (HALTED or so) */
1150 default:
1151 break;
1152 }
1153out:
1154 q->first_to_check=f_mod_no;
1155
1156#ifdef QDIO_USE_PROCESSING_STATE
1157 if (last_position>=0)
1158 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1159#endif /* QDIO_USE_PROCESSING_STATE */
1160
1161 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1162
1163 return q->first_to_check;
1164}
1165
1166static int
1167qdio_has_inbound_q_moved(struct qdio_q *q)
1168{
1169 int i;
1170
1171 i=qdio_get_inbound_buffer_frontier(q);
1172 if ( (i!=GET_SAVED_FRONTIER(q)) ||
1173 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1174 SAVE_FRONTIER(q,i);
1175 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1176 SAVE_TIMESTAMP(q);
1177
1178 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1179 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1180 return 1;
1181 } else {
1182 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1183 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1184 return 0;
1185 }
1186}
1187
1188/* means, no more buffers to be filled */
1189static int
1190tiqdio_is_inbound_q_done(struct qdio_q *q)
1191{
1192 int no_used;
1193 unsigned int start_buf, count;
1194 unsigned char state = 0;
1195 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1196
1197#ifdef CONFIG_QDIO_DEBUG
1198 char dbf_text[15];
1199#endif
1200
1201 no_used=atomic_read(&q->number_of_buffers_used);
1202
1203 /* propagate the change from 82 to 80 through VM */
1204 SYNC_MEMORY;
1205
1206#ifdef CONFIG_QDIO_DEBUG
1207 if (no_used) {
1208 sprintf(dbf_text,"iqisnt%02x",no_used);
1209 QDIO_DBF_TEXT4(0,trace,dbf_text);
1210 } else {
1211 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1212 }
1213 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1214#endif /* CONFIG_QDIO_DEBUG */
1215
1216 if (!no_used)
1217 return 1;
1218 if (irq->is_qebsm) {
1219 count = 1;
1220 start_buf = q->first_to_check;
1221 qdio_do_eqbs(q, &state, &start_buf, &count);
1222 } else
1223 state = q->slsb.acc.val[q->first_to_check];
1224 if (state != SLSB_P_INPUT_PRIMED)
1225 /*
1226 * nothing more to do, if next buffer is not PRIMED.
1227 * note that we did a SYNC_MEMORY before, that there
1228 * has been a sychnronization.
1229 * we will return 0 below, as there is nothing to do
1230 * (stop_polling not necessary, as we have not been
1231 * using the PROCESSING state
1232 */
1233 return 0;
1234
1235 /*
1236 * ok, the next input buffer is primed. that means, that device state
1237 * change indicator and adapter local summary are set, so we will find
1238 * it next time.
1239 * we will return 0 below, as there is nothing to do, except scheduling
1240 * ourselves for the next time.
1241 */
1242 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1243 tiqdio_sched_tl();
1244 return 0;
1245}
1246
1247static int
1248qdio_is_inbound_q_done(struct qdio_q *q)
1249{
1250 int no_used;
1251 unsigned int start_buf, count;
1252 unsigned char state = 0;
1253 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1254
1255#ifdef CONFIG_QDIO_DEBUG
1256 char dbf_text[15];
1257#endif
1258
1259 no_used=atomic_read(&q->number_of_buffers_used);
1260
1261 /*
1262 * we need that one for synchronization with the adapter, as it
1263 * does a kind of PCI avoidance
1264 */
1265 SYNC_MEMORY;
1266
1267 if (!no_used) {
1268 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1269 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1270 return 1;
1271 }
1272 if (irq->is_qebsm) {
1273 count = 1;
1274 start_buf = q->first_to_check;
1275 qdio_do_eqbs(q, &state, &start_buf, &count);
1276 } else
1277 state = q->slsb.acc.val[q->first_to_check];
1278 if (state == SLSB_P_INPUT_PRIMED) {
1279 /* we got something to do */
1280 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1281 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1282 return 0;
1283 }
1284
1285 /* on VM, we don't poll, so the q is always done here */
1286 if (q->siga_sync)
1287 return 1;
1288 if (q->hydra_gives_outbound_pcis)
1289 return 1;
1290
1291 /*
1292 * at this point we know, that inbound first_to_check
1293 * has (probably) not moved (see qdio_inbound_processing)
1294 */
1295 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1296#ifdef CONFIG_QDIO_DEBUG
1297 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1298 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1299 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1300 QDIO_DBF_TEXT4(0,trace,dbf_text);
1301#endif /* CONFIG_QDIO_DEBUG */
1302 return 1;
1303 } else {
1304#ifdef CONFIG_QDIO_DEBUG
1305 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1306 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1307 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1308 QDIO_DBF_TEXT4(0,trace,dbf_text);
1309#endif /* CONFIG_QDIO_DEBUG */
1310 return 0;
1311 }
1312}
1313
1314static void
1315qdio_kick_inbound_handler(struct qdio_q *q)
1316{
1317 int count, start, end, real_end, i;
1318#ifdef CONFIG_QDIO_DEBUG
1319 char dbf_text[15];
1320#endif
1321
1322 QDIO_DBF_TEXT4(0,trace,"kickinh");
1323 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1324
1325 start=q->first_element_to_kick;
1326 real_end=q->first_to_check;
1327 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1328
1329 i=start;
1330 count=0;
1331 while (1) {
1332 count++;
1333 if (i==end)
1334 break;
1335 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1336 }
1337
1338#ifdef CONFIG_QDIO_DEBUG
1339 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1340 QDIO_DBF_TEXT4(0,trace,dbf_text);
1341#endif /* CONFIG_QDIO_DEBUG */
1342
1343 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1344 q->handler(q->cdev,
1345 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1346 q->qdio_error,q->siga_error,q->q_no,start,count,
1347 q->int_parm);
1348
1349 /* for the next time: */
1350 q->first_element_to_kick=real_end;
1351 q->qdio_error=0;
1352 q->siga_error=0;
1353 q->error_status_flags=0;
1354
1355 qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1356}
1357
1358static void
1359__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1360{
1361 struct qdio_irq *irq_ptr;
1362 struct qdio_q *oq;
1363 int i;
1364
1365 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1366 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1367
1368 /*
1369 * we first want to reserve the q, so that we know, that we don't
1370 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1371 * be set
1372 */
1373 if (unlikely(qdio_reserve_q(q))) {
1374 qdio_release_q(q);
1375 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1376 /*
1377 * as we might just be about to stop polling, we make
1378 * sure that we check again at least once more
1379 */
1380 tiqdio_sched_tl();
1381 return;
1382 }
1383 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1384 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1385 qdio_unmark_q(q);
1386 goto out;
1387 }
1388
1389 /*
1390 * we reset spare_ind_was_set, when the queue does not use the
1391 * spare indicator
1392 */
1393 if (spare_ind_was_set)
1394 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1395
1396 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1397 goto out;
1398 /*
1399 * q->dev_st_chg_ind is the indicator, be it shared or not.
1400 * only clear it, if indicator is non-shared
1401 */
1402 if (q->dev_st_chg_ind != &spare_indicator)
1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404
1405 if (q->hydra_gives_outbound_pcis) {
1406 if (!q->siga_sync_done_on_thinints) {
1407 SYNC_MEMORY_ALL;
1408 } else if (!q->siga_sync_done_on_outb_tis) {
1409 SYNC_MEMORY_ALL_OUTB;
1410 }
1411 } else {
1412 SYNC_MEMORY;
1413 }
1414 /*
1415 * maybe we have to do work on our outbound queues... at least
1416 * we have to check the outbound-int-capable thinint-capable
1417 * queues
1418 */
1419 if (q->hydra_gives_outbound_pcis) {
1420 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1421 for (i=0;i<irq_ptr->no_output_qs;i++) {
1422 oq = irq_ptr->output_qs[i];
1423 if (!qdio_is_outbound_q_done(oq)) {
1424 qdio_perf_stat_dec(&perf_stats.tl_runs);
1425 __qdio_outbound_processing(oq);
1426 }
1427 }
1428 }
1429
1430 if (!qdio_has_inbound_q_moved(q))
1431 goto out;
1432
1433 qdio_kick_inbound_handler(q);
1434 if (tiqdio_is_inbound_q_done(q))
1435 if (!qdio_stop_polling(q)) {
1436 /*
1437 * we set the flags to get into the stuff next time,
1438 * see also comment in qdio_stop_polling
1439 */
1440 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1441 tiqdio_sched_tl();
1442 }
1443out:
1444 qdio_release_q(q);
1445}
1446
1447static void
1448tiqdio_inbound_processing(unsigned long q)
1449{
1450 __tiqdio_inbound_processing((struct qdio_q *) q,
1451 atomic_read(&spare_indicator_usecount));
1452}
1453
1454static void
1455__qdio_inbound_processing(struct qdio_q *q)
1456{
1457 int q_laps=0;
1458
1459 QDIO_DBF_TEXT4(0,trace,"qinproc");
1460 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1461
1462 if (unlikely(qdio_reserve_q(q))) {
1463 qdio_release_q(q);
1464 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1465 /* as we're sissies, we'll check next time */
1466 if (likely(!atomic_read(&q->is_in_shutdown))) {
1467 qdio_mark_q(q);
1468 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1469 }
1470 return;
1471 }
1472 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1473 qdio_perf_stat_inc(&perf_stats.tl_runs);
1474
1475again:
1476 if (qdio_has_inbound_q_moved(q)) {
1477 qdio_kick_inbound_handler(q);
1478 if (!qdio_stop_polling(q)) {
1479 q_laps++;
1480 if (q_laps<QDIO_Q_LAPS)
1481 goto again;
1482 }
1483 qdio_mark_q(q);
1484 } else {
1485 if (!qdio_is_inbound_q_done(q))
1486 /* means poll time is not yet over */
1487 qdio_mark_q(q);
1488 }
1489
1490 qdio_release_q(q);
1491}
1492
1493static void
1494qdio_inbound_processing(unsigned long q)
1495{
1496 __qdio_inbound_processing((struct qdio_q *) q);
1497}
1498
1499/************************* MAIN ROUTINES *******************************/
1500
1501#ifdef QDIO_USE_PROCESSING_STATE
1502static int
1503tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1504{
1505 if (!q) {
1506 tiqdio_sched_tl();
1507 return 0;
1508 }
1509
1510 /*
1511 * under VM, we have not used the PROCESSING state, so no
1512 * need to stop polling
1513 */
1514 if (q->siga_sync)
1515 return 2;
1516
1517 if (unlikely(qdio_reserve_q(q))) {
1518 qdio_release_q(q);
1519 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1520 /*
1521 * as we might just be about to stop polling, we make
1522 * sure that we check again at least once more
1523 */
1524
1525 /*
1526 * sanity -- we'd get here without setting the
1527 * dev st chg ind
1528 */
1529 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1530 tiqdio_sched_tl();
1531 return 0;
1532 }
1533 if (qdio_stop_polling(q)) {
1534 qdio_release_q(q);
1535 return 2;
1536 }
1537 if (q_laps<QDIO_Q_LAPS-1) {
1538 qdio_release_q(q);
1539 return 3;
1540 }
1541 /*
1542 * we set the flags to get into the stuff
1543 * next time, see also comment in qdio_stop_polling
1544 */
1545 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1546 tiqdio_sched_tl();
1547 qdio_release_q(q);
1548 return 1;
1549
1550}
1551#endif /* QDIO_USE_PROCESSING_STATE */
1552
1553static void
1554tiqdio_inbound_checks(void)
1555{
1556 struct qdio_q *q;
1557 int spare_ind_was_set=0;
1558#ifdef QDIO_USE_PROCESSING_STATE
1559 int q_laps=0;
1560#endif /* QDIO_USE_PROCESSING_STATE */
1561
1562 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1563 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1564
1565#ifdef QDIO_USE_PROCESSING_STATE
1566again:
1567#endif /* QDIO_USE_PROCESSING_STATE */
1568
1569 /* when the spare indicator is used and set, save that and clear it */
1570 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1571 spare_ind_was_set = 1;
1572 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1573 }
1574
1575 q=(struct qdio_q*)tiq_list;
1576 do {
1577 if (!q)
1578 break;
1579 __tiqdio_inbound_processing(q, spare_ind_was_set);
1580 q=(struct qdio_q*)q->list_next;
1581 } while (q!=(struct qdio_q*)tiq_list);
1582
1583#ifdef QDIO_USE_PROCESSING_STATE
1584 q=(struct qdio_q*)tiq_list;
1585 do {
1586 int ret;
1587
1588 ret = tiqdio_reset_processing_state(q, q_laps);
1589 switch (ret) {
1590 case 0:
1591 return;
1592 case 1:
1593 q_laps++;
1594 case 2:
1595 q = (struct qdio_q*)q->list_next;
1596 break;
1597 default:
1598 q_laps++;
1599 goto again;
1600 }
1601 } while (q!=(struct qdio_q*)tiq_list);
1602#endif /* QDIO_USE_PROCESSING_STATE */
1603}
1604
1605static void
1606tiqdio_tl(unsigned long data)
1607{
1608 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1609
1610 qdio_perf_stat_inc(&perf_stats.tl_runs);
1611
1612 tiqdio_inbound_checks();
1613}
1614
1615/********************* GENERAL HELPER_ROUTINES ***********************/
1616
1617static void
1618qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1619{
1620 int i;
1621 struct qdio_q *q;
1622
1623 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
1624 q = irq_ptr->input_qs[i];
1625 if (q) {
1626 free_page((unsigned long) q->slib);
1627 kmem_cache_free(qdio_q_cache, q);
1628 }
1629 q = irq_ptr->output_qs[i];
1630 if (q) {
1631 free_page((unsigned long) q->slib);
1632 kmem_cache_free(qdio_q_cache, q);
1633 }
1634 }
1635 free_page((unsigned long) irq_ptr->qdr);
1636 free_page((unsigned long) irq_ptr);
1637}
1638
1639static void
1640qdio_set_impl_params(struct qdio_irq *irq_ptr,
1641 unsigned int qib_param_field_format,
1642 /* pointer to 128 bytes or NULL, if no param field */
1643 unsigned char *qib_param_field,
1644 /* pointer to no_queues*128 words of data or NULL */
1645 unsigned int no_input_qs,
1646 unsigned int no_output_qs,
1647 unsigned long *input_slib_elements,
1648 unsigned long *output_slib_elements)
1649{
1650 int i,j;
1651
1652 if (!irq_ptr)
1653 return;
1654
1655 irq_ptr->qib.pfmt=qib_param_field_format;
1656 if (qib_param_field)
1657 memcpy(irq_ptr->qib.parm,qib_param_field,
1658 QDIO_MAX_BUFFERS_PER_Q);
1659
1660 if (input_slib_elements)
1661 for (i=0;i<no_input_qs;i++) {
1662 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1663 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1664 input_slib_elements[
1665 i*QDIO_MAX_BUFFERS_PER_Q+j];
1666 }
1667 if (output_slib_elements)
1668 for (i=0;i<no_output_qs;i++) {
1669 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1670 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1671 output_slib_elements[
1672 i*QDIO_MAX_BUFFERS_PER_Q+j];
1673 }
1674}
1675
1676static int
1677qdio_alloc_qs(struct qdio_irq *irq_ptr,
1678 int no_input_qs, int no_output_qs)
1679{
1680 int i;
1681 struct qdio_q *q;
1682
1683 for (i = 0; i < no_input_qs; i++) {
1684 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1685 if (!q)
1686 return -ENOMEM;
1687 memset(q, 0, sizeof(*q));
1688
1689 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1690 if (!q->slib) {
1691 kmem_cache_free(qdio_q_cache, q);
1692 return -ENOMEM;
1693 }
1694 irq_ptr->input_qs[i]=q;
1695 }
1696
1697 for (i = 0; i < no_output_qs; i++) {
1698 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1699 if (!q)
1700 return -ENOMEM;
1701 memset(q, 0, sizeof(*q));
1702
1703 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1704 if (!q->slib) {
1705 kmem_cache_free(qdio_q_cache, q);
1706 return -ENOMEM;
1707 }
1708 irq_ptr->output_qs[i]=q;
1709 }
1710 return 0;
1711}
1712
1713static void
1714qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1715 int no_input_qs, int no_output_qs,
1716 qdio_handler_t *input_handler,
1717 qdio_handler_t *output_handler,
1718 unsigned long int_parm,int q_format,
1719 unsigned long flags,
1720 void **inbound_sbals_array,
1721 void **outbound_sbals_array)
1722{
1723 struct qdio_q *q;
1724 int i,j;
1725 char dbf_text[20]; /* see qdio_initialize */
1726 void *ptr;
1727 int available;
1728
1729 sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1730 QDIO_DBF_TEXT0(0,setup,dbf_text);
1731 for (i=0;i<no_input_qs;i++) {
1732 q=irq_ptr->input_qs[i];
1733
1734 memset(q,0,((char*)&q->slib)-((char*)q));
1735 sprintf(dbf_text,"in-q%4x",i);
1736 QDIO_DBF_TEXT0(0,setup,dbf_text);
1737 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1738
1739 memset(q->slib,0,PAGE_SIZE);
1740 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1741
1742 available=0;
1743
1744 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1745 q->sbal[j]=*(inbound_sbals_array++);
1746
1747 q->queue_type=q_format;
1748 q->int_parm=int_parm;
1749 q->schid = irq_ptr->schid;
1750 q->irq_ptr = irq_ptr;
1751 q->cdev = cdev;
1752 q->mask=1<<(31-i);
1753 q->q_no=i;
1754 q->is_input_q=1;
1755 q->first_to_check=0;
1756 q->last_move_ftc=0;
1757 q->handler=input_handler;
1758 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1759
1760 /* q->is_thinint_q isn't valid at this time, but
1761 * irq_ptr->is_thinint_irq is
1762 */
1763 if (irq_ptr->is_thinint_irq)
1764 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
1765 (unsigned long) q);
1766 else
1767 tasklet_init(&q->tasklet, qdio_inbound_processing,
1768 (unsigned long) q);
1769
1770 /* actually this is not used for inbound queues. yet. */
1771 atomic_set(&q->busy_siga_counter,0);
1772 q->timing.busy_start=0;
1773
1774/* for (j=0;j<QDIO_STATS_NUMBER;j++)
1775 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1776 QDIO_STATS_NUMBER)*j;
1777 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1778*/
1779
1780 /* fill in slib */
1781 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1782 (unsigned long)(q->slib);
1783 q->slib->sla=(unsigned long)(q->sl);
1784 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1785
1786 /* fill in sl */
1787 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1788 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1789
1790 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1791 ptr=(void*)q->sl;
1792 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1793 ptr=(void*)&q->slsb;
1794 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1795 ptr=(void*)q->sbal[0];
1796 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1797
1798 /* fill in slsb */
1799 if (!irq_ptr->is_qebsm) {
1800 unsigned int count = 1;
1801 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1802 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1803 }
1804 }
1805
1806 for (i=0;i<no_output_qs;i++) {
1807 q=irq_ptr->output_qs[i];
1808 memset(q,0,((char*)&q->slib)-((char*)q));
1809
1810 sprintf(dbf_text,"outq%4x",i);
1811 QDIO_DBF_TEXT0(0,setup,dbf_text);
1812 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1813
1814 memset(q->slib,0,PAGE_SIZE);
1815 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1816
1817 available=0;
1818
1819 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1820 q->sbal[j]=*(outbound_sbals_array++);
1821
1822 q->queue_type=q_format;
1823 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1824 (no_output_qs > 1) &&
1825 (i == no_output_qs-1))
1826 q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1827 q->int_parm=int_parm;
1828 q->is_input_q=0;
1829 q->is_pci_out = 0;
1830 q->schid = irq_ptr->schid;
1831 q->cdev = cdev;
1832 q->irq_ptr = irq_ptr;
1833 q->mask=1<<(31-i);
1834 q->q_no=i;
1835 q->first_to_check=0;
1836 q->last_move_ftc=0;
1837 q->handler=output_handler;
1838
1839 tasklet_init(&q->tasklet, qdio_outbound_processing,
1840 (unsigned long) q);
1841 setup_timer(&q->timer, qdio_outbound_processing,
1842 (unsigned long) q);
1843
1844 atomic_set(&q->busy_siga_counter,0);
1845 q->timing.busy_start=0;
1846
1847 /* fill in slib */
1848 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1849 (unsigned long)(q->slib);
1850 q->slib->sla=(unsigned long)(q->sl);
1851 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1852
1853 /* fill in sl */
1854 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1855 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1856
1857 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1858 ptr=(void*)q->sl;
1859 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1860 ptr=(void*)&q->slsb;
1861 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1862 ptr=(void*)q->sbal[0];
1863 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1864
1865 /* fill in slsb */
1866 if (!irq_ptr->is_qebsm) {
1867 unsigned int count = 1;
1868 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1869 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1870 }
1871 }
1872}
1873
1874static void
1875qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1876 unsigned int no_input_qs,
1877 unsigned int no_output_qs,
1878 unsigned int min_input_threshold,
1879 unsigned int max_input_threshold,
1880 unsigned int min_output_threshold,
1881 unsigned int max_output_threshold)
1882{
1883 int i;
1884 struct qdio_q *q;
1885
1886 for (i=0;i<no_input_qs;i++) {
1887 q=irq_ptr->input_qs[i];
1888 q->timing.threshold=max_input_threshold;
1889/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1890 q->threshold_classes[j].threshold=
1891 min_input_threshold+
1892 (max_input_threshold-min_input_threshold)/
1893 QDIO_STATS_CLASSES;
1894 }
1895 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1896 }
1897 for (i=0;i<no_output_qs;i++) {
1898 q=irq_ptr->output_qs[i];
1899 q->timing.threshold=max_output_threshold;
1900/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1901 q->threshold_classes[j].threshold=
1902 min_output_threshold+
1903 (max_output_threshold-min_output_threshold)/
1904 QDIO_STATS_CLASSES;
1905 }
1906 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1907 }
1908}
1909
1910static void tiqdio_thinint_handler(void *ind, void *drv_data)
1911{
1912 QDIO_DBF_TEXT4(0,trace,"thin_int");
1913
1914 qdio_perf_stat_inc(&perf_stats.thinints);
1915
1916 /* SVS only when needed:
1917 * issue SVS to benefit from iqdio interrupt avoidance
1918 * (SVS clears AISOI)*/
1919 if (!omit_svs)
1920 tiqdio_clear_global_summary();
1921
1922 tiqdio_inbound_checks();
1923}
1924
1925static void
1926qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1927{
1928 int i;
1929#ifdef CONFIG_QDIO_DEBUG
1930 char dbf_text[15];
1931
1932 QDIO_DBF_TEXT5(0,trace,"newstate");
1933 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1934 QDIO_DBF_TEXT5(0,trace,dbf_text);
1935#endif /* CONFIG_QDIO_DEBUG */
1936
1937 irq_ptr->state=state;
1938 for (i=0;i<irq_ptr->no_input_qs;i++)
1939 irq_ptr->input_qs[i]->state=state;
1940 for (i=0;i<irq_ptr->no_output_qs;i++)
1941 irq_ptr->output_qs[i]->state=state;
1942 mb();
1943}
1944
1945static void
1946qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1947{
1948 char dbf_text[15];
1949
1950 if (irb->esw.esw0.erw.cons) {
1951 sprintf(dbf_text,"sens%4x",schid.sch_no);
1952 QDIO_DBF_TEXT2(1,trace,dbf_text);
1953 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1954
1955 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1956 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1957 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1958 }
1959
1960}
1961
1962static void
1963qdio_handle_pci(struct qdio_irq *irq_ptr)
1964{
1965 int i;
1966 struct qdio_q *q;
1967
1968 qdio_perf_stat_inc(&perf_stats.pcis);
1969 for (i=0;i<irq_ptr->no_input_qs;i++) {
1970 q=irq_ptr->input_qs[i];
1971 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1972 qdio_mark_q(q);
1973 else {
1974 qdio_perf_stat_dec(&perf_stats.tl_runs);
1975 __qdio_inbound_processing(q);
1976 }
1977 }
1978 if (!irq_ptr->hydra_gives_outbound_pcis)
1979 return;
1980 for (i=0;i<irq_ptr->no_output_qs;i++) {
1981 q=irq_ptr->output_qs[i];
1982 if (qdio_is_outbound_q_done(q))
1983 continue;
1984 qdio_perf_stat_dec(&perf_stats.tl_runs);
1985 if (!irq_ptr->sync_done_on_outb_pcis)
1986 SYNC_MEMORY;
1987 __qdio_outbound_processing(q);
1988 }
1989}
1990
1991static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1992
1993static void
1994qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1995 int cstat, int dstat)
1996{
1997 struct qdio_irq *irq_ptr;
1998 struct qdio_q *q;
1999 char dbf_text[15];
2000
2001 irq_ptr = cdev->private->qdio_data;
2002
2003 QDIO_DBF_TEXT2(1, trace, "ick2");
2004 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2005 QDIO_DBF_TEXT2(1,trace,dbf_text);
2006 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2007 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2008 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2009 QDIO_PRINT_ERR("received check condition on activate " \
2010 "queues on device %s (cs=x%x, ds=x%x).\n",
2011 cdev->dev.bus_id, cstat, dstat);
2012 if (irq_ptr->no_input_qs) {
2013 q=irq_ptr->input_qs[0];
2014 } else if (irq_ptr->no_output_qs) {
2015 q=irq_ptr->output_qs[0];
2016 } else {
2017 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2018 cdev->dev.bus_id);
2019 goto omit_handler_call;
2020 }
2021 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2022 QDIO_STATUS_LOOK_FOR_ERROR,
2023 0,0,0,-1,-1,q->int_parm);
2024omit_handler_call:
2025 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2026
2027}
2028
2029static void
2030qdio_call_shutdown(struct work_struct *work)
2031{
2032 struct ccw_device_private *priv;
2033 struct ccw_device *cdev;
2034
2035 priv = container_of(work, struct ccw_device_private, kick_work);
2036 cdev = priv->cdev;
2037 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2038 put_device(&cdev->dev);
2039}
2040
2041static void
2042qdio_timeout_handler(struct ccw_device *cdev)
2043{
2044 struct qdio_irq *irq_ptr;
2045 char dbf_text[15];
2046
2047 QDIO_DBF_TEXT2(0, trace, "qtoh");
2048 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2049 QDIO_DBF_TEXT2(0, trace, dbf_text);
2050
2051 irq_ptr = cdev->private->qdio_data;
2052 sprintf(dbf_text, "state:%d", irq_ptr->state);
2053 QDIO_DBF_TEXT2(0, trace, dbf_text);
2054
2055 switch (irq_ptr->state) {
2056 case QDIO_IRQ_STATE_INACTIVE:
2057 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2058 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2059 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2060 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2061 break;
2062 case QDIO_IRQ_STATE_CLEANUP:
2063 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2064 "irq=0.%x.%x.\n",
2065 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2066 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2067 break;
2068 case QDIO_IRQ_STATE_ESTABLISHED:
2069 case QDIO_IRQ_STATE_ACTIVE:
2070 /* I/O has been terminated by common I/O layer. */
2071 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2072 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2073 QDIO_DBF_TEXT2(1, trace, "cio:term");
2074 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2075 if (get_device(&cdev->dev)) {
2076 /* Can't call shutdown from interrupt context. */
2077 PREPARE_WORK(&cdev->private->kick_work,
2078 qdio_call_shutdown);
2079 queue_work(ccw_device_work, &cdev->private->kick_work);
2080 }
2081 break;
2082 default:
2083 BUG();
2084 }
2085 ccw_device_set_timeout(cdev, 0);
2086 wake_up(&cdev->private->wait_q);
2087}
2088
2089static void
2090qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2091{
2092 struct qdio_irq *irq_ptr;
2093 int cstat,dstat;
2094 char dbf_text[15];
2095
2096#ifdef CONFIG_QDIO_DEBUG
2097 QDIO_DBF_TEXT4(0, trace, "qint");
2098 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2099 QDIO_DBF_TEXT4(0, trace, dbf_text);
2100#endif /* CONFIG_QDIO_DEBUG */
2101
2102 if (!intparm) {
2103 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2104 "handler, device %s\n", cdev->dev.bus_id);
2105 return;
2106 }
2107
2108 irq_ptr = cdev->private->qdio_data;
2109 if (!irq_ptr) {
2110 QDIO_DBF_TEXT2(1, trace, "uint");
2111 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2112 QDIO_DBF_TEXT2(1,trace,dbf_text);
2113 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2114 cdev->dev.bus_id);
2115 return;
2116 }
2117
2118 if (IS_ERR(irb)) {
2119 /* Currently running i/o is in error. */
2120 switch (PTR_ERR(irb)) {
2121 case -EIO:
2122 QDIO_PRINT_ERR("i/o error on device %s\n",
2123 cdev->dev.bus_id);
2124 return;
2125 case -ETIMEDOUT:
2126 qdio_timeout_handler(cdev);
2127 return;
2128 default:
2129 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2130 PTR_ERR(irb), cdev->dev.bus_id);
2131 return;
2132 }
2133 }
2134
2135 qdio_irq_check_sense(irq_ptr->schid, irb);
2136
2137#ifdef CONFIG_QDIO_DEBUG
2138 sprintf(dbf_text, "state:%d", irq_ptr->state);
2139 QDIO_DBF_TEXT4(0, trace, dbf_text);
2140#endif /* CONFIG_QDIO_DEBUG */
2141
2142 cstat = irb->scsw.cstat;
2143 dstat = irb->scsw.dstat;
2144
2145 switch (irq_ptr->state) {
2146 case QDIO_IRQ_STATE_INACTIVE:
2147 qdio_establish_handle_irq(cdev, cstat, dstat);
2148 break;
2149
2150 case QDIO_IRQ_STATE_CLEANUP:
2151 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2152 break;
2153
2154 case QDIO_IRQ_STATE_ESTABLISHED:
2155 case QDIO_IRQ_STATE_ACTIVE:
2156 if (cstat & SCHN_STAT_PCI) {
2157 qdio_handle_pci(irq_ptr);
2158 break;
2159 }
2160
2161 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2162 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2163 break;
2164 }
2165 default:
2166 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2167 "device %s?!\n",
2168 irq_ptr->state, cdev->dev.bus_id);
2169 }
2170 wake_up(&cdev->private->wait_q);
2171
2172}
2173
2174int
2175qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2176 unsigned int queue_number)
2177{
2178 int cc = 0;
2179 struct qdio_q *q;
2180 struct qdio_irq *irq_ptr;
2181 void *ptr;
2182#ifdef CONFIG_QDIO_DEBUG
2183 char dbf_text[15]="SyncXXXX";
2184#endif
2185
2186 irq_ptr = cdev->private->qdio_data;
2187 if (!irq_ptr)
2188 return -ENODEV;
2189
2190#ifdef CONFIG_QDIO_DEBUG
2191 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2192 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2193 *((int*)(&dbf_text[0]))=flags;
2194 *((int*)(&dbf_text[4]))=queue_number;
2195 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2196#endif /* CONFIG_QDIO_DEBUG */
2197
2198 if (flags&QDIO_FLAG_SYNC_INPUT) {
2199 q=irq_ptr->input_qs[queue_number];
2200 if (!q)
2201 return -EINVAL;
2202 if (!(irq_ptr->is_qebsm))
2203 cc = do_siga_sync(q->schid, 0, q->mask);
2204 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2205 q=irq_ptr->output_qs[queue_number];
2206 if (!q)
2207 return -EINVAL;
2208 if (!(irq_ptr->is_qebsm))
2209 cc = do_siga_sync(q->schid, q->mask, 0);
2210 } else
2211 return -EINVAL;
2212
2213 ptr=&cc;
2214 if (cc)
2215 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2216
2217 return cc;
2218}
2219
2220static int
2221qdio_get_ssqd_information(struct subchannel_id *schid,
2222 struct qdio_chsc_ssqd **ssqd_area)
2223{
2224 int result;
2225
2226 QDIO_DBF_TEXT0(0, setup, "getssqd");
2227 *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2228 if (!ssqd_area) {
2229 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2230 schid->sch_no);
2231 return -ENOMEM;
2232 }
2233
2234 (*ssqd_area)->request = (struct chsc_header) {
2235 .length = 0x0010,
2236 .code = 0x0024,
2237 };
2238 (*ssqd_area)->first_sch = schid->sch_no;
2239 (*ssqd_area)->last_sch = schid->sch_no;
2240 (*ssqd_area)->ssid = schid->ssid;
2241 result = chsc(*ssqd_area);
2242
2243 if (result) {
2244 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2245 result, schid->ssid, schid->sch_no);
2246 goto out;
2247 }
2248
2249 if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2250 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2251 (*ssqd_area)->response.code,
2252 schid->ssid, schid->sch_no);
2253 goto out;
2254 }
2255 if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2256 !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2257 ((*ssqd_area)->sch != schid->sch_no)) {
2258 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2259 "using all SIGAs.\n",
2260 schid->ssid, schid->sch_no);
2261 goto out;
2262 }
2263 return 0;
2264out:
2265 return -EINVAL;
2266}
2267
2268int
2269qdio_get_ssqd_pct(struct ccw_device *cdev)
2270{
2271 struct qdio_chsc_ssqd *ssqd_area;
2272 struct subchannel_id schid;
2273 char dbf_text[15];
2274 int rc;
2275 int pct = 0;
2276
2277 QDIO_DBF_TEXT0(0, setup, "getpct");
2278 schid = ccw_device_get_subchannel_id(cdev);
2279 rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2280 if (!rc)
2281 pct = (int)ssqd_area->pct;
2282 if (rc != -ENOMEM)
2283 mempool_free(ssqd_area, qdio_mempool_scssc);
2284 sprintf(dbf_text, "pct: %d", pct);
2285 QDIO_DBF_TEXT2(0, setup, dbf_text);
2286 return pct;
2287}
2288EXPORT_SYMBOL(qdio_get_ssqd_pct);
2289
2290static void
2291qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2292{
2293 struct qdio_q *q;
2294 int i;
2295 unsigned int count, start_buf;
2296 char dbf_text[15];
2297
2298 /*check if QEBSM is disabled */
2299 if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2300 irq_ptr->is_qebsm = 0;
2301 irq_ptr->sch_token = 0;
2302 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2303 QDIO_DBF_TEXT0(0,setup,"noV=V");
2304 return;
2305 }
2306 irq_ptr->sch_token = token;
2307 /*input queue*/
2308 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2309 q = irq_ptr->input_qs[i];
2310 count = QDIO_MAX_BUFFERS_PER_Q;
2311 start_buf = 0;
2312 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2313 }
2314 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2315 QDIO_DBF_TEXT0(0,setup,dbf_text);
2316 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2317 QDIO_DBF_TEXT0(0,setup,dbf_text);
2318 /*output queue*/
2319 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2320 q = irq_ptr->output_qs[i];
2321 count = QDIO_MAX_BUFFERS_PER_Q;
2322 start_buf = 0;
2323 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2324 }
2325}
2326
2327static void
2328qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2329{
2330 int rc;
2331 struct qdio_chsc_ssqd *ssqd_area;
2332
2333 QDIO_DBF_TEXT0(0,setup,"getssqd");
2334 irq_ptr->qdioac = 0;
2335 rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2336 if (rc) {
2337 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2338 irq_ptr->schid.sch_no);
2339 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2340 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2341 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2342 irq_ptr->is_qebsm = 0;
2343 } else
2344 irq_ptr->qdioac = ssqd_area->qdioac1;
2345
2346 qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2347 if (rc != -ENOMEM)
2348 mempool_free(ssqd_area, qdio_mempool_scssc);
2349}
2350
2351static unsigned int
2352tiqdio_check_chsc_availability(void)
2353{
2354 char dbf_text[15];
2355
2356 if (!css_characteristics_avail)
2357 return -EIO;
2358
2359 /* Check for bit 41. */
2360 if (!css_general_characteristics.aif) {
2361 QDIO_PRINT_WARN("Adapter interruption facility not " \
2362 "installed.\n");
2363 return -ENOENT;
2364 }
2365
2366 /* Check for bits 107 and 108. */
2367 if (!css_chsc_characteristics.scssc ||
2368 !css_chsc_characteristics.scsscf) {
2369 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2370 "not available.\n");
2371 return -ENOENT;
2372 }
2373
2374 /* Check for OSA/FCP thin interrupts (bit 67). */
2375 hydra_thinints = css_general_characteristics.aif_osa;
2376 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2377 QDIO_DBF_TEXT0(0,setup,dbf_text);
2378
2379#ifdef CONFIG_64BIT
2380 /* Check for QEBSM support in general (bit 58). */
2381 is_passthrough = css_general_characteristics.qebsm;
2382#endif
2383 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2384 QDIO_DBF_TEXT0(0,setup,dbf_text);
2385
2386 /* Check for aif time delay disablement fac (bit 56). If installed,
2387 * omit svs even under lpar (good point by rick again) */
2388 omit_svs = css_general_characteristics.aif_tdd;
2389 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2390 QDIO_DBF_TEXT0(0,setup,dbf_text);
2391 return 0;
2392}
2393
2394
2395static unsigned int
2396tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2397{
2398 unsigned long real_addr_local_summary_bit;
2399 unsigned long real_addr_dev_st_chg_ind;
2400 void *ptr;
2401 char dbf_text[15];
2402
2403 unsigned int resp_code;
2404 int result;
2405
2406 struct {
2407 struct chsc_header request;
2408 u16 operation_code;
2409 u16 reserved1;
2410 u32 reserved2;
2411 u32 reserved3;
2412 u64 summary_indicator_addr;
2413 u64 subchannel_indicator_addr;
2414 u32 ks:4;
2415 u32 kc:4;
2416 u32 reserved4:21;
2417 u32 isc:3;
2418 u32 word_with_d_bit;
2419 /* set to 0x10000000 to enable
2420 * time delay disablement facility */
2421 u32 reserved5;
2422 struct subchannel_id schid;
2423 u32 reserved6[1004];
2424 struct chsc_header response;
2425 u32 reserved7;
2426 } *scssc_area;
2427
2428 if (!irq_ptr->is_thinint_irq)
2429 return -ENODEV;
2430
2431 if (reset_to_zero) {
2432 real_addr_local_summary_bit=0;
2433 real_addr_dev_st_chg_ind=0;
2434 } else {
2435 real_addr_local_summary_bit=
2436 virt_to_phys((volatile void *)tiqdio_ind);
2437 real_addr_dev_st_chg_ind=
2438 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2439 }
2440
2441 scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2442 if (!scssc_area) {
2443 QDIO_PRINT_WARN("No memory for setting indicators on " \
2444 "subchannel 0.%x.%x.\n",
2445 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2446 return -ENOMEM;
2447 }
2448 scssc_area->request = (struct chsc_header) {
2449 .length = 0x0fe0,
2450 .code = 0x0021,
2451 };
2452 scssc_area->operation_code = 0;
2453
2454 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2455 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2456 scssc_area->ks = QDIO_STORAGE_KEY;
2457 scssc_area->kc = QDIO_STORAGE_KEY;
2458 scssc_area->isc = TIQDIO_THININT_ISC;
2459 scssc_area->schid = irq_ptr->schid;
2460 /* enables the time delay disablement facility. Don't care
2461 * whether it is really there (i.e. we haven't checked for
2462 * it) */
2463 if (css_general_characteristics.aif_tdd)
2464 scssc_area->word_with_d_bit = 0x10000000;
2465 else
2466 QDIO_PRINT_WARN("Time delay disablement facility " \
2467 "not available\n");
2468
2469 result = chsc(scssc_area);
2470 if (result) {
2471 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2472 "cc=%i.\n",
2473 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2474 result = -EIO;
2475 goto out;
2476 }
2477
2478 resp_code = scssc_area->response.code;
2479 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2480 QDIO_PRINT_WARN("response upon setting indicators " \
2481 "is 0x%x.\n",resp_code);
2482 sprintf(dbf_text,"sidR%4x",resp_code);
2483 QDIO_DBF_TEXT1(0,trace,dbf_text);
2484 QDIO_DBF_TEXT1(0,setup,dbf_text);
2485 ptr=&scssc_area->response;
2486 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2487 result = -EIO;
2488 goto out;
2489 }
2490
2491 QDIO_DBF_TEXT2(0,setup,"setscind");
2492 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2493 sizeof(unsigned long));
2494 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2495 result = 0;
2496out:
2497 mempool_free(scssc_area, qdio_mempool_scssc);
2498 return result;
2499
2500}
2501
2502static unsigned int
2503tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2504{
2505 unsigned int resp_code;
2506 int result;
2507 void *ptr;
2508 char dbf_text[15];
2509
2510 struct {
2511 struct chsc_header request;
2512 u16 operation_code;
2513 u16 reserved1;
2514 u32 reserved2;
2515 u32 reserved3;
2516 u32 reserved4[2];
2517 u32 delay_target;
2518 u32 reserved5[1009];
2519 struct chsc_header response;
2520 u32 reserved6;
2521 } *scsscf_area;
2522
2523 if (!irq_ptr->is_thinint_irq)
2524 return -ENODEV;
2525
2526 scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2527 if (!scsscf_area) {
2528 QDIO_PRINT_WARN("No memory for setting delay target on " \
2529 "subchannel 0.%x.%x.\n",
2530 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2531 return -ENOMEM;
2532 }
2533 scsscf_area->request = (struct chsc_header) {
2534 .length = 0x0fe0,
2535 .code = 0x1027,
2536 };
2537
2538 scsscf_area->delay_target = delay_target<<16;
2539
2540 result=chsc(scsscf_area);
2541 if (result) {
2542 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2543 "cc=%i. Continuing.\n",
2544 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2545 result);
2546 result = -EIO;
2547 goto out;
2548 }
2549
2550 resp_code = scsscf_area->response.code;
2551 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2552 QDIO_PRINT_WARN("response upon setting delay target " \
2553 "is 0x%x. Continuing.\n",resp_code);
2554 sprintf(dbf_text,"sdtR%4x",resp_code);
2555 QDIO_DBF_TEXT1(0,trace,dbf_text);
2556 QDIO_DBF_TEXT1(0,setup,dbf_text);
2557 ptr=&scsscf_area->response;
2558 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2559 }
2560 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2561 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2562 result = 0; /* not critical */
2563out:
2564 mempool_free(scsscf_area, qdio_mempool_scssc);
2565 return result;
2566}
2567
2568int
2569qdio_cleanup(struct ccw_device *cdev, int how)
2570{
2571 struct qdio_irq *irq_ptr;
2572 char dbf_text[15];
2573 int rc;
2574
2575 irq_ptr = cdev->private->qdio_data;
2576 if (!irq_ptr)
2577 return -ENODEV;
2578
2579 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2580 QDIO_DBF_TEXT1(0,trace,dbf_text);
2581 QDIO_DBF_TEXT0(0,setup,dbf_text);
2582
2583 rc = qdio_shutdown(cdev, how);
2584 if ((rc == 0) || (rc == -EINPROGRESS))
2585 rc = qdio_free(cdev);
2586 return rc;
2587}
2588
2589int
2590qdio_shutdown(struct ccw_device *cdev, int how)
2591{
2592 struct qdio_irq *irq_ptr;
2593 int i;
2594 int result = 0;
2595 int rc;
2596 unsigned long flags;
2597 int timeout;
2598 char dbf_text[15];
2599
2600 irq_ptr = cdev->private->qdio_data;
2601 if (!irq_ptr)
2602 return -ENODEV;
2603
2604 down(&irq_ptr->setting_up_sema);
2605
2606 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2607 QDIO_DBF_TEXT1(0,trace,dbf_text);
2608 QDIO_DBF_TEXT0(0,setup,dbf_text);
2609
2610 /* mark all qs as uninteresting */
2611 for (i=0;i<irq_ptr->no_input_qs;i++)
2612 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2613
2614 for (i=0;i<irq_ptr->no_output_qs;i++)
2615 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2616
2617 tasklet_kill(&tiqdio_tasklet);
2618
2619 for (i=0;i<irq_ptr->no_input_qs;i++) {
2620 qdio_unmark_q(irq_ptr->input_qs[i]);
2621 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2622 wait_event_interruptible_timeout(cdev->private->wait_q,
2623 !atomic_read(&irq_ptr->
2624 input_qs[i]->
2625 use_count),
2626 QDIO_NO_USE_COUNT_TIMEOUT);
2627 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2628 result=-EINPROGRESS;
2629 }
2630
2631 for (i=0;i<irq_ptr->no_output_qs;i++) {
2632 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2633 del_timer(&irq_ptr->output_qs[i]->timer);
2634 wait_event_interruptible_timeout(cdev->private->wait_q,
2635 !atomic_read(&irq_ptr->
2636 output_qs[i]->
2637 use_count),
2638 QDIO_NO_USE_COUNT_TIMEOUT);
2639 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2640 result=-EINPROGRESS;
2641 }
2642
2643 /* cleanup subchannel */
2644 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2645 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2646 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2647 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2648 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2649 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2650 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2651 } else { /* default behaviour */
2652 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2653 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2654 }
2655 if (rc == -ENODEV) {
2656 /* No need to wait for device no longer present. */
2657 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2658 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2659 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2660 /*
2661 * Whoever put another handler there, has to cope with the
2662 * interrupt theirself. Might happen if qdio_shutdown was
2663 * called on already shutdown queues, but this shouldn't have
2664 * bad side effects.
2665 */
2666 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2667 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2668 } else if (rc == 0) {
2669 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2670 ccw_device_set_timeout(cdev, timeout);
2671 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2672
2673 wait_event(cdev->private->wait_q,
2674 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2675 irq_ptr->state == QDIO_IRQ_STATE_ERR);
2676 } else {
2677 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2678 "device %s\n", result, cdev->dev.bus_id);
2679 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2680 result = rc;
2681 goto out;
2682 }
2683 if (irq_ptr->is_thinint_irq) {
2684 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2685 tiqdio_set_subchannel_ind(irq_ptr,1);
2686 /* reset adapter interrupt indicators */
2687 }
2688
2689 /* exchange int handlers, if necessary */
2690 if ((void*)cdev->handler == (void*)qdio_handler)
2691 cdev->handler=irq_ptr->original_int_handler;
2692
2693 /* Ignore errors. */
2694 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2695 ccw_device_set_timeout(cdev, 0);
2696out:
2697 up(&irq_ptr->setting_up_sema);
2698 return result;
2699}
2700
2701int
2702qdio_free(struct ccw_device *cdev)
2703{
2704 struct qdio_irq *irq_ptr;
2705 char dbf_text[15];
2706
2707 irq_ptr = cdev->private->qdio_data;
2708 if (!irq_ptr)
2709 return -ENODEV;
2710
2711 down(&irq_ptr->setting_up_sema);
2712
2713 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2714 QDIO_DBF_TEXT1(0,trace,dbf_text);
2715 QDIO_DBF_TEXT0(0,setup,dbf_text);
2716
2717 cdev->private->qdio_data = NULL;
2718
2719 up(&irq_ptr->setting_up_sema);
2720
2721 qdio_release_irq_memory(irq_ptr);
2722 module_put(THIS_MODULE);
2723 return 0;
2724}
2725
2726static void
2727qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2728{
2729 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2730
2731 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2733 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2734 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2735 QDIO_DBF_TEXT0(0,setup,dbf_text);
2736 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2737 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2738 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2739 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2740 QDIO_DBF_TEXT0(0,setup,dbf_text);
2741 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2742 QDIO_DBF_TEXT0(0,setup,dbf_text);
2743 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2744 QDIO_DBF_TEXT0(0,setup,dbf_text);
2745 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2746 QDIO_DBF_TEXT0(0,setup,dbf_text);
2747 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2748 QDIO_DBF_TEXT0(0,setup,dbf_text);
2749 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2750 QDIO_DBF_TEXT0(0,setup,dbf_text);
2751 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2752 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2753 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2754 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2755 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2756 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2757}
2758
2759static void
2760qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2761{
2762 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2763 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2764
2765 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2766
2767 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2768
2769 irq_ptr->qdr->qdf0[i].slsba=
2770 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2771
2772 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2773 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2774 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2775 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2776}
2777
2778static void
2779qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2780 int j, int iqfmt)
2781{
2782 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2783 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2784
2785 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2786
2787 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2788
2789 irq_ptr->qdr->qdf0[i+j].slsba=
2790 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2791
2792 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2793 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2794 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2795 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2796}
2797
2798
2799static void
2800qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2801{
2802 int i;
2803
2804 for (i=0;i<irq_ptr->no_input_qs;i++) {
2805 irq_ptr->input_qs[i]->siga_sync=
2806 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2807 irq_ptr->input_qs[i]->siga_in=
2808 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2809 irq_ptr->input_qs[i]->siga_out=
2810 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2811 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2812 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2813 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2814 irq_ptr->hydra_gives_outbound_pcis;
2815 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2816 ((irq_ptr->qdioac&
2817 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2818 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2819 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2820 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2821
2822 }
2823}
2824
2825static void
2826qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2827{
2828 int i;
2829
2830 for (i=0;i<irq_ptr->no_output_qs;i++) {
2831 irq_ptr->output_qs[i]->siga_sync=
2832 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2833 irq_ptr->output_qs[i]->siga_in=
2834 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2835 irq_ptr->output_qs[i]->siga_out=
2836 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2837 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2838 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2839 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2840 irq_ptr->hydra_gives_outbound_pcis;
2841 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2842 ((irq_ptr->qdioac&
2843 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2844 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2845 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2846 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2847
2848 }
2849}
2850
2851static int
2852qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2853 int dstat)
2854{
2855 char dbf_text[15];
2856 struct qdio_irq *irq_ptr;
2857
2858 irq_ptr = cdev->private->qdio_data;
2859
2860 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2861 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2862 QDIO_DBF_TEXT2(1,trace,dbf_text);
2863 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2864 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2865 QDIO_PRINT_ERR("received check condition on establish " \
2866 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2867 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2868 cstat,dstat);
2869 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2870 }
2871
2872 if (!(dstat & DEV_STAT_DEV_END)) {
2873 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2874 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2875 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2876 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2877 "device end: dstat=%02x, cstat=%02x\n",
2878 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2879 dstat, cstat);
2880 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2881 return 1;
2882 }
2883
2884 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2885 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2886 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2887 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2888 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2889 "the following devstat: dstat=%02x, "
2890 "cstat=%02x\n", irq_ptr->schid.ssid,
2891 irq_ptr->schid.sch_no, dstat, cstat);
2892 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2893 return 1;
2894 }
2895 return 0;
2896}
2897
2898static void
2899qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2900{
2901 struct qdio_irq *irq_ptr;
2902 char dbf_text[15];
2903
2904 irq_ptr = cdev->private->qdio_data;
2905
2906 sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2907 QDIO_DBF_TEXT0(0,setup,dbf_text);
2908 QDIO_DBF_TEXT0(0,trace,dbf_text);
2909
2910 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2911 ccw_device_set_timeout(cdev, 0);
2912 return;
2913 }
2914
2915 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2916 ccw_device_set_timeout(cdev, 0);
2917}
2918
2919int
2920qdio_initialize(struct qdio_initialize *init_data)
2921{
2922 int rc;
2923 char dbf_text[15];
2924
2925 sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2926 QDIO_DBF_TEXT0(0,setup,dbf_text);
2927 QDIO_DBF_TEXT0(0,trace,dbf_text);
2928
2929 rc = qdio_allocate(init_data);
2930 if (rc == 0) {
2931 rc = qdio_establish(init_data);
2932 if (rc != 0)
2933 qdio_free(init_data->cdev);
2934 }
2935
2936 return rc;
2937}
2938
2939
2940int
2941qdio_allocate(struct qdio_initialize *init_data)
2942{
2943 struct qdio_irq *irq_ptr;
2944 char dbf_text[15];
2945
2946 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2947 QDIO_DBF_TEXT0(0,setup,dbf_text);
2948 QDIO_DBF_TEXT0(0,trace,dbf_text);
2949 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2950 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2951 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2952 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2953 return -EINVAL;
2954
2955 if (!init_data->input_sbal_addr_array)
2956 return -EINVAL;
2957
2958 if (!init_data->output_sbal_addr_array)
2959 return -EINVAL;
2960
2961 qdio_allocate_do_dbf(init_data);
2962
2963 /* create irq */
2964 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2965
2966 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2967 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2968
2969 if (!irq_ptr) {
2970 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
2971 return -ENOMEM;
2972 }
2973
2974 init_MUTEX(&irq_ptr->setting_up_sema);
2975
2976 /* QDR must be in DMA area since CCW data address is only 32 bit */
2977 irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
2978 if (!(irq_ptr->qdr)) {
2979 free_page((unsigned long) irq_ptr);
2980 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
2981 return -ENOMEM;
2982 }
2983 QDIO_DBF_TEXT0(0,setup,"qdr:");
2984 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2985
2986 if (qdio_alloc_qs(irq_ptr,
2987 init_data->no_input_qs,
2988 init_data->no_output_qs)) {
2989 QDIO_PRINT_ERR("queue allocation failed!\n");
2990 qdio_release_irq_memory(irq_ptr);
2991 return -ENOMEM;
2992 }
2993
2994 init_data->cdev->private->qdio_data = irq_ptr;
2995
2996 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2997
2998 return 0;
2999}
3000
3001static int qdio_fill_irq(struct qdio_initialize *init_data)
3002{
3003 int i;
3004 char dbf_text[15];
3005 struct ciw *ciw;
3006 int is_iqdio;
3007 struct qdio_irq *irq_ptr;
3008
3009 irq_ptr = init_data->cdev->private->qdio_data;
3010
3011 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3012
3013 /* wipes qib.ac, required by ar7063 */
3014 memset(irq_ptr->qdr,0,sizeof(struct qdr));
3015
3016 irq_ptr->int_parm=init_data->int_parm;
3017
3018 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3019 irq_ptr->no_input_qs=init_data->no_input_qs;
3020 irq_ptr->no_output_qs=init_data->no_output_qs;
3021
3022 if (init_data->q_format==QDIO_IQDIO_QFMT) {
3023 irq_ptr->is_iqdio_irq=1;
3024 irq_ptr->is_thinint_irq=1;
3025 } else {
3026 irq_ptr->is_iqdio_irq=0;
3027 irq_ptr->is_thinint_irq=hydra_thinints;
3028 }
3029 sprintf(dbf_text,"is_i_t%1x%1x",
3030 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3031 QDIO_DBF_TEXT2(0,setup,dbf_text);
3032
3033 if (irq_ptr->is_thinint_irq) {
3034 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3035 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3036 if (!irq_ptr->dev_st_chg_ind) {
3037 QDIO_PRINT_WARN("no indicator location available " \
3038 "for irq 0.%x.%x\n",
3039 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3040 qdio_release_irq_memory(irq_ptr);
3041 return -ENOBUFS;
3042 }
3043 }
3044
3045 /* defaults */
3046 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3047 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3048 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3049 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3050
3051 qdio_fill_qs(irq_ptr, init_data->cdev,
3052 init_data->no_input_qs,
3053 init_data->no_output_qs,
3054 init_data->input_handler,
3055 init_data->output_handler,init_data->int_parm,
3056 init_data->q_format,init_data->flags,
3057 init_data->input_sbal_addr_array,
3058 init_data->output_sbal_addr_array);
3059
3060 if (!try_module_get(THIS_MODULE)) {
3061 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3062 qdio_release_irq_memory(irq_ptr);
3063 return -EINVAL;
3064 }
3065
3066 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3067 init_data->no_output_qs,
3068 init_data->min_input_threshold,
3069 init_data->max_input_threshold,
3070 init_data->min_output_threshold,
3071 init_data->max_output_threshold);
3072
3073 /* fill in qdr */
3074 irq_ptr->qdr->qfmt=init_data->q_format;
3075 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3076 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3077 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3078 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3079
3080 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3081 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3082
3083 /* fill in qib */
3084 irq_ptr->is_qebsm = is_passthrough;
3085 if (irq_ptr->is_qebsm)
3086 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3087
3088 irq_ptr->qib.qfmt=init_data->q_format;
3089 if (init_data->no_input_qs)
3090 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3091 if (init_data->no_output_qs)
3092 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3093 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3094
3095 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3096 init_data->qib_param_field,
3097 init_data->no_input_qs,
3098 init_data->no_output_qs,
3099 init_data->input_slib_elements,
3100 init_data->output_slib_elements);
3101
3102 /* first input descriptors, then output descriptors */
3103 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3104 for (i=0;i<init_data->no_input_qs;i++)
3105 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3106
3107 for (i=0;i<init_data->no_output_qs;i++)
3108 qdio_allocate_fill_output_desc(irq_ptr, i,
3109 init_data->no_input_qs,
3110 is_iqdio);
3111
3112 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3113
3114 /* get qdio commands */
3115 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3116 if (!ciw) {
3117 QDIO_DBF_TEXT2(1,setup,"no eq");
3118 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3119 "Trying to use default.\n");
3120 } else
3121 irq_ptr->equeue = *ciw;
3122 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3123 if (!ciw) {
3124 QDIO_DBF_TEXT2(1,setup,"no aq");
3125 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3126 "Trying to use default.\n");
3127 } else
3128 irq_ptr->aqueue = *ciw;
3129
3130 /* Set new interrupt handler. */
3131 irq_ptr->original_int_handler = init_data->cdev->handler;
3132 init_data->cdev->handler = qdio_handler;
3133
3134 return 0;
3135}
3136
3137int
3138qdio_establish(struct qdio_initialize *init_data)
3139{
3140 struct qdio_irq *irq_ptr;
3141 unsigned long saveflags;
3142 int result, result2;
3143 struct ccw_device *cdev;
3144 char dbf_text[20];
3145
3146 cdev=init_data->cdev;
3147 irq_ptr = cdev->private->qdio_data;
3148 if (!irq_ptr)
3149 return -EINVAL;
3150
3151 if (cdev->private->state != DEV_STATE_ONLINE)
3152 return -EINVAL;
3153
3154 down(&irq_ptr->setting_up_sema);
3155
3156 qdio_fill_irq(init_data);
3157
3158 /* the thinint CHSC stuff */
3159 if (irq_ptr->is_thinint_irq) {
3160
3161 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3162 if (result) {
3163 up(&irq_ptr->setting_up_sema);
3164 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3165 return result;
3166 }
3167 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3168 }
3169
3170 sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3171 QDIO_DBF_TEXT0(0,setup,dbf_text);
3172 QDIO_DBF_TEXT0(0,trace,dbf_text);
3173
3174 /* establish q */
3175 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3176 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3177 irq_ptr->ccw.count=irq_ptr->equeue.count;
3178 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3179
3180 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3181
3182 ccw_device_set_options_mask(cdev, 0);
3183 result = ccw_device_start(cdev, &irq_ptr->ccw,
3184 QDIO_DOING_ESTABLISH, 0, 0);
3185 if (result) {
3186 result2 = ccw_device_start(cdev, &irq_ptr->ccw,
3187 QDIO_DOING_ESTABLISH, 0, 0);
3188 sprintf(dbf_text,"eq:io%4x",result);
3189 QDIO_DBF_TEXT2(1,setup,dbf_text);
3190 if (result2) {
3191 sprintf(dbf_text,"eq:io%4x",result);
3192 QDIO_DBF_TEXT2(1,setup,dbf_text);
3193 }
3194 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3195 "returned %i, next try returned %i\n",
3196 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3197 result, result2);
3198 result=result2;
3199 if (result)
3200 ccw_device_set_timeout(cdev, 0);
3201 }
3202
3203 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3204
3205 if (result) {
3206 up(&irq_ptr->setting_up_sema);
3207 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3208 return result;
3209 }
3210
3211 wait_event_interruptible_timeout(cdev->private->wait_q,
3212 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3213 irq_ptr->state == QDIO_IRQ_STATE_ERR,
3214 QDIO_ESTABLISH_TIMEOUT);
3215
3216 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3217 result = 0;
3218 else {
3219 up(&irq_ptr->setting_up_sema);
3220 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3221 return -EIO;
3222 }
3223
3224 qdio_get_ssqd_siga(irq_ptr);
3225 /* if this gets set once, we're running under VM and can omit SVSes */
3226 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3227 omit_svs=1;
3228
3229 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3230 QDIO_DBF_TEXT2(0,setup,dbf_text);
3231
3232 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3233 QDIO_DBF_TEXT2(0,setup,dbf_text);
3234
3235 irq_ptr->hydra_gives_outbound_pcis=
3236 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3237 irq_ptr->sync_done_on_outb_pcis=
3238 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3239
3240 qdio_initialize_set_siga_flags_input(irq_ptr);
3241 qdio_initialize_set_siga_flags_output(irq_ptr);
3242
3243 up(&irq_ptr->setting_up_sema);
3244
3245 return result;
3246
3247}
3248
3249int
3250qdio_activate(struct ccw_device *cdev, int flags)
3251{
3252 struct qdio_irq *irq_ptr;
3253 int i,result=0,result2;
3254 unsigned long saveflags;
3255 char dbf_text[20]; /* see qdio_initialize */
3256
3257 irq_ptr = cdev->private->qdio_data;
3258 if (!irq_ptr)
3259 return -ENODEV;
3260
3261 if (cdev->private->state != DEV_STATE_ONLINE)
3262 return -EINVAL;
3263
3264 down(&irq_ptr->setting_up_sema);
3265 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3266 result=-EBUSY;
3267 goto out;
3268 }
3269
3270 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3271 QDIO_DBF_TEXT2(0,setup,dbf_text);
3272 QDIO_DBF_TEXT2(0,trace,dbf_text);
3273
3274 /* activate q */
3275 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3276 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3277 irq_ptr->ccw.count=irq_ptr->aqueue.count;
3278 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3279
3280 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3281
3282 ccw_device_set_timeout(cdev, 0);
3283 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3284 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3285 0, DOIO_DENY_PREFETCH);
3286 if (result) {
3287 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3288 QDIO_DOING_ACTIVATE,0,0);
3289 sprintf(dbf_text,"aq:io%4x",result);
3290 QDIO_DBF_TEXT2(1,setup,dbf_text);
3291 if (result2) {
3292 sprintf(dbf_text,"aq:io%4x",result);
3293 QDIO_DBF_TEXT2(1,setup,dbf_text);
3294 }
3295 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3296 "returned %i, next try returned %i\n",
3297 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3298 result, result2);
3299 result=result2;
3300 }
3301
3302 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3303 if (result)
3304 goto out;
3305
3306 for (i=0;i<irq_ptr->no_input_qs;i++) {
3307 if (irq_ptr->is_thinint_irq) {
3308 /*
3309 * that way we know, that, if we will get interrupted
3310 * by tiqdio_inbound_processing, qdio_unmark_q will
3311 * not be called
3312 */
3313 qdio_reserve_q(irq_ptr->input_qs[i]);
3314 qdio_mark_tiq(irq_ptr->input_qs[i]);
3315 qdio_release_q(irq_ptr->input_qs[i]);
3316 }
3317 }
3318
3319 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3320 for (i=0;i<irq_ptr->no_input_qs;i++) {
3321 irq_ptr->input_qs[i]->is_input_q|=
3322 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3323 }
3324 }
3325
3326 msleep(QDIO_ACTIVATE_TIMEOUT);
3327 switch (irq_ptr->state) {
3328 case QDIO_IRQ_STATE_STOPPED:
3329 case QDIO_IRQ_STATE_ERR:
3330 up(&irq_ptr->setting_up_sema);
3331 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3332 down(&irq_ptr->setting_up_sema);
3333 result = -EIO;
3334 break;
3335 default:
3336 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3337 result = 0;
3338 }
3339 out:
3340 up(&irq_ptr->setting_up_sema);
3341
3342 return result;
3343}
3344
3345/* buffers filled forwards again to make Rick happy */
3346static void
3347qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3348 unsigned int count, struct qdio_buffer *buffers)
3349{
3350 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3351 int tmp = 0;
3352
3353 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3354 if (irq->is_qebsm) {
3355 while (count) {
3356 tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3357 if (!tmp)
3358 return;
3359 }
3360 return;
3361 }
3362 for (;;) {
3363 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3364 count--;
3365 if (!count) break;
3366 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3367 }
3368}
3369
3370static void
3371qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers)
3373{
3374 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3375 int tmp = 0;
3376
3377 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3378 if (irq->is_qebsm) {
3379 while (count) {
3380 tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3381 if (!tmp)
3382 return;
3383 }
3384 return;
3385 }
3386
3387 for (;;) {
3388 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3389 count--;
3390 if (!count) break;
3391 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3392 }
3393}
3394
3395static void
3396do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3397 unsigned int qidx, unsigned int count,
3398 struct qdio_buffer *buffers)
3399{
3400 int used_elements;
3401
3402 /* This is the inbound handling of queues */
3403 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3404
3405 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3406
3407 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3408 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3409 atomic_xchg(&q->polling,0);
3410
3411 if (used_elements)
3412 return;
3413 if (callflags&QDIO_FLAG_DONT_SIGA)
3414 return;
3415 if (q->siga_in) {
3416 int result;
3417
3418 result=qdio_siga_input(q);
3419 if (result) {
3420 if (q->siga_error)
3421 q->error_status_flags|=
3422 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3423 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3424 q->siga_error=result;
3425 }
3426 }
3427
3428 qdio_mark_q(q);
3429}
3430
3431static void
3432do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3433 unsigned int qidx, unsigned int count,
3434 struct qdio_buffer *buffers)
3435{
3436 int used_elements;
3437 unsigned int cnt, start_buf;
3438 unsigned char state = 0;
3439 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3440
3441 /* This is the outbound handling of queues */
3442 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3443
3444 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3445
3446 if (callflags&QDIO_FLAG_DONT_SIGA) {
3447 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3448 return;
3449 }
3450 if (callflags & QDIO_FLAG_PCI_OUT)
3451 q->is_pci_out = 1;
3452 else
3453 q->is_pci_out = 0;
3454 if (q->is_iqdio_q) {
3455 /* one siga for every sbal */
3456 while (count--)
3457 qdio_kick_outbound_q(q);
3458
3459 __qdio_outbound_processing(q);
3460 } else {
3461 /* under VM, we do a SIGA sync unconditionally */
3462 SYNC_MEMORY;
3463 else {
3464 /*
3465 * w/o shadow queues (else branch of
3466 * SYNC_MEMORY :-/ ), we try to
3467 * fast-requeue buffers
3468 */
3469 if (irq->is_qebsm) {
3470 cnt = 1;
3471 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3472 (QDIO_MAX_BUFFERS_PER_Q-1));
3473 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3474 } else
3475 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3476 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3477 if (state != SLSB_CU_OUTPUT_PRIMED) {
3478 qdio_kick_outbound_q(q);
3479 } else {
3480 QDIO_DBF_TEXT3(0,trace, "fast-req");
3481 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3482 }
3483 }
3484 /*
3485 * only marking the q could take too long,
3486 * the upper layer module could do a lot of
3487 * traffic in that time
3488 */
3489 __qdio_outbound_processing(q);
3490 }
3491
3492 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3493}
3494
3495/* count must be 1 in iqdio */
3496int
3497do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3498 unsigned int queue_number, unsigned int qidx,
3499 unsigned int count,struct qdio_buffer *buffers)
3500{
3501 struct qdio_irq *irq_ptr;
3502#ifdef CONFIG_QDIO_DEBUG
3503 char dbf_text[20];
3504
3505 sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3506 QDIO_DBF_TEXT3(0,trace,dbf_text);
3507#endif /* CONFIG_QDIO_DEBUG */
3508
3509 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3510 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3511 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3512 return -EINVAL;
3513
3514 if (count==0)
3515 return 0;
3516
3517 irq_ptr = cdev->private->qdio_data;
3518 if (!irq_ptr)
3519 return -ENODEV;
3520
3521#ifdef CONFIG_QDIO_DEBUG
3522 if (callflags&QDIO_FLAG_SYNC_INPUT)
3523 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3524 sizeof(void*));
3525 else
3526 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3527 sizeof(void*));
3528 sprintf(dbf_text,"flag%04x",callflags);
3529 QDIO_DBF_TEXT3(0,trace,dbf_text);
3530 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3531 QDIO_DBF_TEXT3(0,trace,dbf_text);
3532#endif /* CONFIG_QDIO_DEBUG */
3533
3534 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3535 return -EBUSY;
3536
3537 if (callflags&QDIO_FLAG_SYNC_INPUT)
3538 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3539 callflags, qidx, count, buffers);
3540 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3541 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3542 callflags, qidx, count, buffers);
3543 else {
3544 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3545 return -EINVAL;
3546 }
3547 return 0;
3548}
3549
3550static int
3551qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3552 int buffer_length, int *eof, void *data)
3553{
3554 int c=0;
3555
3556 /* we are always called with buffer_length=4k, so we all
3557 deliver on the first read */
3558 if (offset>0)
3559 return 0;
3560
3561#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3562#ifdef CONFIG_64BIT
3563 _OUTP_IT("Number of tasklet runs (total) : %li\n",
3564 (long)atomic64_read(&perf_stats.tl_runs));
3565 _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n",
3566 (long)atomic64_read(&perf_stats.inbound_tl_runs),
3567 (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3568 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n",
3569 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3570 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3571 _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n",
3572 (long)atomic64_read(&perf_stats.outbound_tl_runs),
3573 (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3574 _OUTP_IT("\n");
3575 _OUTP_IT("Number of SIGA sync's issued : %li\n",
3576 (long)atomic64_read(&perf_stats.siga_syncs));
3577 _OUTP_IT("Number of SIGA in's issued : %li\n",
3578 (long)atomic64_read(&perf_stats.siga_ins));
3579 _OUTP_IT("Number of SIGA out's issued : %li\n",
3580 (long)atomic64_read(&perf_stats.siga_outs));
3581 _OUTP_IT("Number of PCIs caught : %li\n",
3582 (long)atomic64_read(&perf_stats.pcis));
3583 _OUTP_IT("Number of adapter interrupts caught : %li\n",
3584 (long)atomic64_read(&perf_stats.thinints));
3585 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n",
3586 (long)atomic64_read(&perf_stats.fast_reqs));
3587 _OUTP_IT("\n");
3588 _OUTP_IT("Number of inbound transfers : %li\n",
3589 (long)atomic64_read(&perf_stats.inbound_cnt));
3590 _OUTP_IT("Number of do_QDIOs outbound : %li\n",
3591 (long)atomic64_read(&perf_stats.outbound_cnt));
3592#else /* CONFIG_64BIT */
3593 _OUTP_IT("Number of tasklet runs (total) : %i\n",
3594 atomic_read(&perf_stats.tl_runs));
3595 _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n",
3596 atomic_read(&perf_stats.inbound_tl_runs),
3597 atomic_read(&perf_stats.inbound_tl_runs_resched));
3598 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n",
3599 atomic_read(&perf_stats.inbound_thin_tl_runs),
3600 atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3601 _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n",
3602 atomic_read(&perf_stats.outbound_tl_runs),
3603 atomic_read(&perf_stats.outbound_tl_runs_resched));
3604 _OUTP_IT("\n");
3605 _OUTP_IT("Number of SIGA sync's issued : %i\n",
3606 atomic_read(&perf_stats.siga_syncs));
3607 _OUTP_IT("Number of SIGA in's issued : %i\n",
3608 atomic_read(&perf_stats.siga_ins));
3609 _OUTP_IT("Number of SIGA out's issued : %i\n",
3610 atomic_read(&perf_stats.siga_outs));
3611 _OUTP_IT("Number of PCIs caught : %i\n",
3612 atomic_read(&perf_stats.pcis));
3613 _OUTP_IT("Number of adapter interrupts caught : %i\n",
3614 atomic_read(&perf_stats.thinints));
3615 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n",
3616 atomic_read(&perf_stats.fast_reqs));
3617 _OUTP_IT("\n");
3618 _OUTP_IT("Number of inbound transfers : %i\n",
3619 atomic_read(&perf_stats.inbound_cnt));
3620 _OUTP_IT("Number of do_QDIOs outbound : %i\n",
3621 atomic_read(&perf_stats.outbound_cnt));
3622#endif /* CONFIG_64BIT */
3623 _OUTP_IT("\n");
3624
3625 return c;
3626}
3627
3628static struct proc_dir_entry *qdio_perf_proc_file;
3629
3630static void
3631qdio_add_procfs_entry(void)
3632{
3633 proc_perf_file_registration=0;
3634 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3635 S_IFREG|0444,NULL);
3636 if (qdio_perf_proc_file) {
3637 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3638 } else proc_perf_file_registration=-1;
3639
3640 if (proc_perf_file_registration)
3641 QDIO_PRINT_WARN("was not able to register perf. " \
3642 "proc-file (%i).\n",
3643 proc_perf_file_registration);
3644}
3645
3646static void
3647qdio_remove_procfs_entry(void)
3648{
3649 if (!proc_perf_file_registration) /* means if it went ok earlier */
3650 remove_proc_entry(QDIO_PERF,NULL);
3651}
3652
3653/**
3654 * attributes in sysfs
3655 *****************************************************************************/
3656
3657static ssize_t
3658qdio_performance_stats_show(struct bus_type *bus, char *buf)
3659{
3660 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3661}
3662
3663static ssize_t
3664qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3665{
3666 unsigned long i;
3667 int ret;
3668
3669 ret = strict_strtoul(buf, 16, &i);
3670 if (!ret && ((i == 0) || (i == 1))) {
3671 if (i == qdio_performance_stats)
3672 return count;
3673 qdio_performance_stats = i;
3674 if (i==0) {
3675 /* reset perf. stat. info */
3676#ifdef CONFIG_64BIT
3677 atomic64_set(&perf_stats.tl_runs, 0);
3678 atomic64_set(&perf_stats.outbound_tl_runs, 0);
3679 atomic64_set(&perf_stats.inbound_tl_runs, 0);
3680 atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3681 atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3682 atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3683 0);
3684 atomic64_set(&perf_stats.siga_outs, 0);
3685 atomic64_set(&perf_stats.siga_ins, 0);
3686 atomic64_set(&perf_stats.siga_syncs, 0);
3687 atomic64_set(&perf_stats.pcis, 0);
3688 atomic64_set(&perf_stats.thinints, 0);
3689 atomic64_set(&perf_stats.fast_reqs, 0);
3690 atomic64_set(&perf_stats.outbound_cnt, 0);
3691 atomic64_set(&perf_stats.inbound_cnt, 0);
3692#else /* CONFIG_64BIT */
3693 atomic_set(&perf_stats.tl_runs, 0);
3694 atomic_set(&perf_stats.outbound_tl_runs, 0);
3695 atomic_set(&perf_stats.inbound_tl_runs, 0);
3696 atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3697 atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3698 atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3699 atomic_set(&perf_stats.siga_outs, 0);
3700 atomic_set(&perf_stats.siga_ins, 0);
3701 atomic_set(&perf_stats.siga_syncs, 0);
3702 atomic_set(&perf_stats.pcis, 0);
3703 atomic_set(&perf_stats.thinints, 0);
3704 atomic_set(&perf_stats.fast_reqs, 0);
3705 atomic_set(&perf_stats.outbound_cnt, 0);
3706 atomic_set(&perf_stats.inbound_cnt, 0);
3707#endif /* CONFIG_64BIT */
3708 }
3709 } else {
3710 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
3711 return -EINVAL;
3712 }
3713 return count;
3714}
3715
3716static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3717 qdio_performance_stats_store);
3718
3719static void
3720tiqdio_register_thinints(void)
3721{
3722 char dbf_text[20];
3723
3724 tiqdio_ind =
3725 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
3726 if (IS_ERR(tiqdio_ind)) {
3727 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3728 QDIO_DBF_TEXT0(0,setup,dbf_text);
3729 QDIO_PRINT_ERR("failed to register adapter handler " \
3730 "(rc=%li).\nAdapter interrupts might " \
3731 "not work. Continuing.\n",
3732 PTR_ERR(tiqdio_ind));
3733 tiqdio_ind = NULL;
3734 }
3735}
3736
3737static void
3738tiqdio_unregister_thinints(void)
3739{
3740 if (tiqdio_ind)
3741 s390_unregister_adapter_interrupt(tiqdio_ind);
3742}
3743
3744static int
3745qdio_get_qdio_memory(void)
3746{
3747 int i;
3748 indicator_used[0]=1;
3749
3750 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3751 indicator_used[i]=0;
3752 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3753 GFP_KERNEL);
3754 if (!indicators)
3755 return -ENOMEM;
3756 return 0;
3757}
3758
3759static void
3760qdio_release_qdio_memory(void)
3761{
3762 kfree(indicators);
3763}
3764
3765static void
3766qdio_unregister_dbf_views(void)
3767{
3768 if (qdio_dbf_setup)
3769 debug_unregister(qdio_dbf_setup);
3770 if (qdio_dbf_sbal)
3771 debug_unregister(qdio_dbf_sbal);
3772 if (qdio_dbf_sense)
3773 debug_unregister(qdio_dbf_sense);
3774 if (qdio_dbf_trace)
3775 debug_unregister(qdio_dbf_trace);
3776#ifdef CONFIG_QDIO_DEBUG
3777 if (qdio_dbf_slsb_out)
3778 debug_unregister(qdio_dbf_slsb_out);
3779 if (qdio_dbf_slsb_in)
3780 debug_unregister(qdio_dbf_slsb_in);
3781#endif /* CONFIG_QDIO_DEBUG */
3782}
3783
3784static int
3785qdio_register_dbf_views(void)
3786{
3787 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3788 QDIO_DBF_SETUP_PAGES,
3789 QDIO_DBF_SETUP_NR_AREAS,
3790 QDIO_DBF_SETUP_LEN);
3791 if (!qdio_dbf_setup)
3792 goto oom;
3793 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3794 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3795
3796 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3797 QDIO_DBF_SBAL_PAGES,
3798 QDIO_DBF_SBAL_NR_AREAS,
3799 QDIO_DBF_SBAL_LEN);
3800 if (!qdio_dbf_sbal)
3801 goto oom;
3802
3803 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3804 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3805
3806 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3807 QDIO_DBF_SENSE_PAGES,
3808 QDIO_DBF_SENSE_NR_AREAS,
3809 QDIO_DBF_SENSE_LEN);
3810 if (!qdio_dbf_sense)
3811 goto oom;
3812
3813 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3814 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3815
3816 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3817 QDIO_DBF_TRACE_PAGES,
3818 QDIO_DBF_TRACE_NR_AREAS,
3819 QDIO_DBF_TRACE_LEN);
3820 if (!qdio_dbf_trace)
3821 goto oom;
3822
3823 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3824 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3825
3826#ifdef CONFIG_QDIO_DEBUG
3827 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3828 QDIO_DBF_SLSB_OUT_PAGES,
3829 QDIO_DBF_SLSB_OUT_NR_AREAS,
3830 QDIO_DBF_SLSB_OUT_LEN);
3831 if (!qdio_dbf_slsb_out)
3832 goto oom;
3833 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3834 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3835
3836 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3837 QDIO_DBF_SLSB_IN_PAGES,
3838 QDIO_DBF_SLSB_IN_NR_AREAS,
3839 QDIO_DBF_SLSB_IN_LEN);
3840 if (!qdio_dbf_slsb_in)
3841 goto oom;
3842 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3843 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3844#endif /* CONFIG_QDIO_DEBUG */
3845 return 0;
3846oom:
3847 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3848 qdio_unregister_dbf_views();
3849 return -ENOMEM;
3850}
3851
3852static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3853{
3854 return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3855}
3856
3857static void qdio_mempool_free(void *element, void *size)
3858{
3859 free_page((unsigned long) element);
3860}
3861
3862static int __init
3863init_QDIO(void)
3864{
3865 int res;
3866 void *ptr;
3867
3868 printk("qdio: loading %s\n",version);
3869
3870 res=qdio_get_qdio_memory();
3871 if (res)
3872 return res;
3873
3874 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
3875 256, 0, NULL);
3876 if (!qdio_q_cache) {
3877 qdio_release_qdio_memory();
3878 return -ENOMEM;
3879 }
3880
3881 res = qdio_register_dbf_views();
3882 if (res) {
3883 kmem_cache_destroy(qdio_q_cache);
3884 qdio_release_qdio_memory();
3885 return res;
3886 }
3887
3888 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3889 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3890
3891 memset((void*)&perf_stats,0,sizeof(perf_stats));
3892 QDIO_DBF_TEXT0(0,setup,"perfstat");
3893 ptr=&perf_stats;
3894 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3895
3896 qdio_add_procfs_entry();
3897
3898 qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3899 qdio_mempool_alloc,
3900 qdio_mempool_free, NULL);
3901
3902 if (tiqdio_check_chsc_availability())
3903 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3904
3905 tiqdio_register_thinints();
3906
3907 return 0;
3908 }
3909
3910static void __exit
3911cleanup_QDIO(void)
3912{
3913 tiqdio_unregister_thinints();
3914 qdio_remove_procfs_entry();
3915 qdio_release_qdio_memory();
3916 qdio_unregister_dbf_views();
3917 mempool_destroy(qdio_mempool_scssc);
3918 kmem_cache_destroy(qdio_q_cache);
3919 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3920 printk("qdio: %s: module removed\n",version);
3921}
3922
3923module_init(init_QDIO);
3924module_exit(cleanup_QDIO);
3925
3926EXPORT_SYMBOL(qdio_allocate);
3927EXPORT_SYMBOL(qdio_establish);
3928EXPORT_SYMBOL(qdio_initialize);
3929EXPORT_SYMBOL(qdio_activate);
3930EXPORT_SYMBOL(do_QDIO);
3931EXPORT_SYMBOL(qdio_shutdown);
3932EXPORT_SYMBOL(qdio_free);
3933EXPORT_SYMBOL(qdio_cleanup);
3934EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c3df6b2c38b7..c1a70985abfa 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,66 +1,20 @@
1/*
2 * linux/drivers/s390/cio/qdio.h
3 *
4 * Copyright 2000,2008 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
1#ifndef _CIO_QDIO_H 8#ifndef _CIO_QDIO_H
2#define _CIO_QDIO_H 9#define _CIO_QDIO_H
3 10
4#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/schid.h>
13#include "chsc.h"
5 14
6#include "schid.h" 15#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
7 16#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
8#ifdef CONFIG_QDIO_DEBUG 17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
9#define QDIO_VERBOSE_LEVEL 9
10#else /* CONFIG_QDIO_DEBUG */
11#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */
13#define QDIO_USE_PROCESSING_STATE
14
15#define QDIO_MINIMAL_BH_RELIEF_TIME 16
16#define QDIO_TIMER_POLL_VALUE 1
17#define IQDIO_TIMER_POLL_VALUE 1
18
19/*
20 * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
21 * we never know, whether we'll get initiative again, e.g. to give the
22 * transmit skb's back to the stack, however the stack may be waiting for
23 * them... therefore we define 4 as threshold to start polling (which
24 * will stop as soon as the asynchronous queue catches up)
25 * btw, this only applies to the asynchronous HiperSockets queue
26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4
28
29#define TIQDIO_THININT_ISC 3
30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
33#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
34#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
35#define IQDIO_LOCAL_LAPS 4
36#define IQDIO_LOCAL_LAPS_INT 1
37#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
38/*#define IQDIO_IQDC_INT_PARM 0x1234*/
39
40#define QDIO_Q_LAPS 5
41
42#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
43
44#define L2_CACHELINE_SIZE 256
45#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
46
47#define QDIO_PERF "qdio_perf"
48
49/* must be a power of 2 */
50/*#define QDIO_STATS_NUMBER 4
51
52#define QDIO_STATS_CLASSES 2
53#define QDIO_STATS_COUNT_NEEDED 2*/
54
55#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
56 exiting without having use_count
57 of the queue to 0 */
58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
61#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
62#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
63#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
64 18
65enum qdio_irq_states { 19enum qdio_irq_states {
66 QDIO_IRQ_STATE_INACTIVE, 20 QDIO_IRQ_STATE_INACTIVE,
@@ -72,565 +26,352 @@ enum qdio_irq_states {
72 NR_QDIO_IRQ_STATES, 26 NR_QDIO_IRQ_STATES,
73}; 27};
74 28
75/* used as intparm in do_IO: */ 29/* used as intparm in do_IO */
76#define QDIO_DOING_SENSEID 0 30#define QDIO_DOING_ESTABLISH 1
77#define QDIO_DOING_ESTABLISH 1 31#define QDIO_DOING_ACTIVATE 2
78#define QDIO_DOING_ACTIVATE 2 32#define QDIO_DOING_CLEANUP 3
79#define QDIO_DOING_CLEANUP 3 33
80 34#define SLSB_STATE_NOT_INIT 0x0
81/************************* DEBUG FACILITY STUFF *********************/ 35#define SLSB_STATE_EMPTY 0x1
82 36#define SLSB_STATE_PRIMED 0x2
83#define QDIO_DBF_HEX(ex,name,level,addr,len) \ 37#define SLSB_STATE_HALTED 0xe
84 do { \ 38#define SLSB_STATE_ERROR 0xf
85 if (ex) \ 39#define SLSB_TYPE_INPUT 0x0
86 debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ 40#define SLSB_TYPE_OUTPUT 0x20
87 else \ 41#define SLSB_OWNER_PROG 0x80
88 debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ 42#define SLSB_OWNER_CU 0x40
89 } while (0) 43
90#define QDIO_DBF_TEXT(ex,name,level,text) \ 44#define SLSB_P_INPUT_NOT_INIT \
91 do { \ 45 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
92 if (ex) \ 46#define SLSB_P_INPUT_ACK \
93 debug_text_exception(qdio_dbf_##name,level,text); \ 47 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
94 else \ 48#define SLSB_CU_INPUT_EMPTY \
95 debug_text_event(qdio_dbf_##name,level,text); \ 49 (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
96 } while (0) 50#define SLSB_P_INPUT_PRIMED \
97 51 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
98 52#define SLSB_P_INPUT_HALTED \
99#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) 53 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
100#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) 54#define SLSB_P_INPUT_ERROR \
101#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) 55 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
102#ifdef CONFIG_QDIO_DEBUG 56#define SLSB_P_OUTPUT_NOT_INIT \
103#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) 57 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
104#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) 58#define SLSB_P_OUTPUT_EMPTY \
105#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) 59 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
106#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) 60#define SLSB_CU_OUTPUT_PRIMED \
107#else /* CONFIG_QDIO_DEBUG */ 61 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
108#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) 62#define SLSB_P_OUTPUT_HALTED \
109#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) 63 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
110#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) 64#define SLSB_P_OUTPUT_ERROR \
111#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) 65 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
112#endif /* CONFIG_QDIO_DEBUG */ 66
113 67#define SLSB_ERROR_DURING_LOOKUP 0xff
114#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) 68
115#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) 69/* additional CIWs returned by extended Sense-ID */
116#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) 70#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
117#ifdef CONFIG_QDIO_DEBUG 71#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
118#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
119#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
120#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
121#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
122#else /* CONFIG_QDIO_DEBUG */
123#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
124#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
125#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
126#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
127#endif /* CONFIG_QDIO_DEBUG */
128
129#define QDIO_DBF_SETUP_NAME "qdio_setup"
130#define QDIO_DBF_SETUP_LEN 8
131#define QDIO_DBF_SETUP_PAGES 4
132#define QDIO_DBF_SETUP_NR_AREAS 1
133#ifdef CONFIG_QDIO_DEBUG
134#define QDIO_DBF_SETUP_LEVEL 6
135#else /* CONFIG_QDIO_DEBUG */
136#define QDIO_DBF_SETUP_LEVEL 2
137#endif /* CONFIG_QDIO_DEBUG */
138
139#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
140#define QDIO_DBF_SBAL_LEN 256
141#define QDIO_DBF_SBAL_PAGES 4
142#define QDIO_DBF_SBAL_NR_AREAS 2
143#ifdef CONFIG_QDIO_DEBUG
144#define QDIO_DBF_SBAL_LEVEL 6
145#else /* CONFIG_QDIO_DEBUG */
146#define QDIO_DBF_SBAL_LEVEL 2
147#endif /* CONFIG_QDIO_DEBUG */
148
149#define QDIO_DBF_TRACE_NAME "qdio_trace"
150#define QDIO_DBF_TRACE_LEN 8
151#define QDIO_DBF_TRACE_NR_AREAS 2
152#ifdef CONFIG_QDIO_DEBUG
153#define QDIO_DBF_TRACE_PAGES 16
154#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
155#else /* CONFIG_QDIO_DEBUG */
156#define QDIO_DBF_TRACE_PAGES 4
157#define QDIO_DBF_TRACE_LEVEL 2
158#endif /* CONFIG_QDIO_DEBUG */
159
160#define QDIO_DBF_SENSE_NAME "qdio_sense"
161#define QDIO_DBF_SENSE_LEN 64
162#define QDIO_DBF_SENSE_PAGES 2
163#define QDIO_DBF_SENSE_NR_AREAS 1
164#ifdef CONFIG_QDIO_DEBUG
165#define QDIO_DBF_SENSE_LEVEL 6
166#else /* CONFIG_QDIO_DEBUG */
167#define QDIO_DBF_SENSE_LEVEL 2
168#endif /* CONFIG_QDIO_DEBUG */
169
170#ifdef CONFIG_QDIO_DEBUG
171#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
172
173#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
174#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
175#define QDIO_DBF_SLSB_OUT_PAGES 256
176#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
177#define QDIO_DBF_SLSB_OUT_LEVEL 6
178
179#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
180#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
181#define QDIO_DBF_SLSB_IN_PAGES 256
182#define QDIO_DBF_SLSB_IN_NR_AREAS 1
183#define QDIO_DBF_SLSB_IN_LEVEL 6
184#endif /* CONFIG_QDIO_DEBUG */
185
186#define QDIO_PRINTK_HEADER QDIO_NAME ": "
187
188#if QDIO_VERBOSE_LEVEL>8
189#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
190#else
191#define QDIO_PRINT_STUPID(x...) do { } while (0)
192#endif
193
194#if QDIO_VERBOSE_LEVEL>7
195#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
196#else
197#define QDIO_PRINT_ALL(x...) do { } while (0)
198#endif
199
200#if QDIO_VERBOSE_LEVEL>6
201#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
202#else
203#define QDIO_PRINT_INFO(x...) do { } while (0)
204#endif
205
206#if QDIO_VERBOSE_LEVEL>5
207#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
208#else
209#define QDIO_PRINT_WARN(x...) do { } while (0)
210#endif
211
212#if QDIO_VERBOSE_LEVEL>4
213#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
214#else
215#define QDIO_PRINT_ERR(x...) do { } while (0)
216#endif
217
218#if QDIO_VERBOSE_LEVEL>3
219#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
220#else
221#define QDIO_PRINT_CRIT(x...) do { } while (0)
222#endif
223 72
224#if QDIO_VERBOSE_LEVEL>2 73/* flags for st qdio sch data */
225#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) 74#define CHSC_FLAG_QDIO_CAPABILITY 0x80
226#else 75#define CHSC_FLAG_VALIDITY 0x40
227#define QDIO_PRINT_ALERT(x...) do { } while (0) 76
228#endif 77/* qdio adapter-characteristics-1 flag */
78#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
79#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
80#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
81#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
82#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
83#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
84#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
229 85
230#if QDIO_VERBOSE_LEVEL>1 86#ifdef CONFIG_64BIT
231#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 87static inline int do_sqbs(u64 token, unsigned char state, int queue,
232#else 88 int *start, int *count)
233#define QDIO_PRINT_EMERG(x...) do { } while (0) 89{
234#endif 90 register unsigned long _ccq asm ("0") = *count;
235 91 register unsigned long _token asm ("1") = token;
236#define QDIO_HEXDUMP16(importance,header,ptr) \ 92 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
237QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
238 "%02x %02x %02x %02x %02x %02x %02x %02x " \
239 "%02x %02x %02x %02x\n",*(((char*)ptr)), \
240 *(((char*)ptr)+1),*(((char*)ptr)+2), \
241 *(((char*)ptr)+3),*(((char*)ptr)+4), \
242 *(((char*)ptr)+5),*(((char*)ptr)+6), \
243 *(((char*)ptr)+7),*(((char*)ptr)+8), \
244 *(((char*)ptr)+9),*(((char*)ptr)+10), \
245 *(((char*)ptr)+11),*(((char*)ptr)+12), \
246 *(((char*)ptr)+13),*(((char*)ptr)+14), \
247 *(((char*)ptr)+15)); \
248QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
249 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
250 *(((char*)ptr)+16),*(((char*)ptr)+17), \
251 *(((char*)ptr)+18),*(((char*)ptr)+19), \
252 *(((char*)ptr)+20),*(((char*)ptr)+21), \
253 *(((char*)ptr)+22),*(((char*)ptr)+23), \
254 *(((char*)ptr)+24),*(((char*)ptr)+25), \
255 *(((char*)ptr)+26),*(((char*)ptr)+27), \
256 *(((char*)ptr)+28),*(((char*)ptr)+29), \
257 *(((char*)ptr)+30),*(((char*)ptr)+31));
258
259/****************** END OF DEBUG FACILITY STUFF *********************/
260 93
261/* 94 asm volatile(
262 * Some instructions as assembly 95 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
263 */ 96 : "+d" (_ccq), "+d" (_queuestart)
97 : "d" ((unsigned long)state), "d" (_token)
98 : "memory", "cc");
99 *count = _ccq & 0xff;
100 *start = _queuestart & 0xff;
264 101
265static inline int 102 return (_ccq >> 32) & 0xff;
266do_sqbs(unsigned long sch, unsigned char state, int queue,
267 unsigned int *start, unsigned int *count)
268{
269#ifdef CONFIG_64BIT
270 register unsigned long _ccq asm ("0") = *count;
271 register unsigned long _sch asm ("1") = sch;
272 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
273
274 asm volatile(
275 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
276 : "+d" (_ccq), "+d" (_queuestart)
277 : "d" ((unsigned long)state), "d" (_sch)
278 : "memory", "cc");
279 *count = _ccq & 0xff;
280 *start = _queuestart & 0xff;
281
282 return (_ccq >> 32) & 0xff;
283#else
284 return 0;
285#endif
286} 103}
287 104
288static inline int 105static inline int do_eqbs(u64 token, unsigned char *state, int queue,
289do_eqbs(unsigned long sch, unsigned char *state, int queue, 106 int *start, int *count)
290 unsigned int *start, unsigned int *count)
291{ 107{
292#ifdef CONFIG_64BIT
293 register unsigned long _ccq asm ("0") = *count; 108 register unsigned long _ccq asm ("0") = *count;
294 register unsigned long _sch asm ("1") = sch; 109 register unsigned long _token asm ("1") = token;
295 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 110 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
296 unsigned long _state = 0; 111 unsigned long _state = 0;
297 112
298 asm volatile( 113 asm volatile(
299 " .insn rrf,0xB99c0000,%1,%2,0,0" 114 " .insn rrf,0xB99c0000,%1,%2,0,0"
300 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 115 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
301 : "d" (_sch) 116 : "d" (_token)
302 : "memory", "cc" ); 117 : "memory", "cc");
303 *count = _ccq & 0xff; 118 *count = _ccq & 0xff;
304 *start = _queuestart & 0xff; 119 *start = _queuestart & 0xff;
305 *state = _state & 0xff; 120 *state = _state & 0xff;
306 121
307 return (_ccq >> 32) & 0xff; 122 return (_ccq >> 32) & 0xff;
308#else
309 return 0;
310#endif
311} 123}
124#else
125static inline int do_sqbs(u64 token, unsigned char state, int queue,
126 int *start, int *count) { return 0; }
127static inline int do_eqbs(u64 token, unsigned char *state, int queue,
128 int *start, int *count) { return 0; }
129#endif /* CONFIG_64BIT */
312 130
131struct qdio_irq;
313 132
314static inline int 133struct siga_flag {
315do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) 134 u8 input:1;
316{ 135 u8 output:1;
317 register unsigned long reg0 asm ("0") = 2; 136 u8 sync:1;
318 register struct subchannel_id reg1 asm ("1") = schid; 137 u8 no_sync_ti:1;
319 register unsigned long reg2 asm ("2") = mask1; 138 u8 no_sync_out_ti:1;
320 register unsigned long reg3 asm ("3") = mask2; 139 u8 no_sync_out_pci:1;
321 int cc; 140 u8:2;
322 141} __attribute__ ((packed));
323 asm volatile(
324 " siga 0\n"
325 " ipm %0\n"
326 " srl %0,28\n"
327 : "=d" (cc)
328 : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
329 return cc;
330}
331
332static inline int
333do_siga_input(struct subchannel_id schid, unsigned int mask)
334{
335 register unsigned long reg0 asm ("0") = 1;
336 register struct subchannel_id reg1 asm ("1") = schid;
337 register unsigned long reg2 asm ("2") = mask;
338 int cc;
339
340 asm volatile(
341 " siga 0\n"
342 " ipm %0\n"
343 " srl %0,28\n"
344 : "=d" (cc)
345 : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
346 return cc;
347}
348
349static inline int
350do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
351 unsigned int fc)
352{
353 register unsigned long __fc asm("0") = fc;
354 register unsigned long __schid asm("1") = schid;
355 register unsigned long __mask asm("2") = mask;
356 int cc;
357
358 asm volatile(
359 " siga 0\n"
360 "0: ipm %0\n"
361 " srl %0,28\n"
362 "1:\n"
363 EX_TABLE(0b,1b)
364 : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
365 : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
366 : "cc", "memory");
367 (*bb) = ((unsigned int) __fc) >> 31;
368 return cc;
369}
370
371static inline unsigned long
372do_clear_global_summary(void)
373{
374 register unsigned long __fn asm("1") = 3;
375 register unsigned long __tmp asm("2");
376 register unsigned long __time asm("3");
377
378 asm volatile(
379 " .insn rre,0xb2650000,2,0"
380 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
381 return __time;
382}
383
384/*
385 * QDIO device commands returned by extended Sense-ID
386 */
387#define DEFAULT_ESTABLISH_QS_CMD 0x1b
388#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
389#define DEFAULT_ACTIVATE_QS_CMD 0x1f
390#define DEFAULT_ACTIVATE_QS_COUNT 0
391
392/*
393 * additional CIWs returned by extended Sense-ID
394 */
395#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
396#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
397
398#define QDIO_CHSC_RESPONSE_CODE_OK 1
399/* flags for st qdio sch data */
400#define CHSC_FLAG_QDIO_CAPABILITY 0x80
401#define CHSC_FLAG_VALIDITY 0x40
402
403#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
404#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
405#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
408 142
409struct qdio_chsc_ssqd { 143struct chsc_ssqd_area {
410 struct chsc_header request; 144 struct chsc_header request;
411 u16 reserved1:10; 145 u16:10;
412 u16 ssid:2; 146 u8 ssid:2;
413 u16 fmt:4; 147 u8 fmt:4;
414 u16 first_sch; 148 u16 first_sch;
415 u16 reserved2; 149 u16:16;
416 u16 last_sch; 150 u16 last_sch;
417 u32 reserved3; 151 u32:32;
418 struct chsc_header response; 152 struct chsc_header response;
419 u32 reserved4; 153 u32:32;
420 u8 flags; 154 struct qdio_ssqd_desc qdio_ssqd;
421 u8 reserved5; 155} __attribute__ ((packed));
422 u16 sch;
423 u8 qfmt;
424 u8 parm;
425 u8 qdioac1;
426 u8 sch_class;
427 u8 pct;
428 u8 icnt;
429 u8 reserved7;
430 u8 ocnt;
431 u8 reserved8;
432 u8 mbccnt;
433 u16 qdioac2;
434 u64 sch_token;
435};
436 156
437struct qdio_perf_stats { 157struct scssc_area {
438#ifdef CONFIG_64BIT 158 struct chsc_header request;
439 atomic64_t tl_runs; 159 u16 operation_code;
440 atomic64_t outbound_tl_runs; 160 u16:16;
441 atomic64_t outbound_tl_runs_resched; 161 u32:32;
442 atomic64_t inbound_tl_runs; 162 u32:32;
443 atomic64_t inbound_tl_runs_resched; 163 u64 summary_indicator_addr;
444 atomic64_t inbound_thin_tl_runs; 164 u64 subchannel_indicator_addr;
445 atomic64_t inbound_thin_tl_runs_resched; 165 u32 ks:4;
446 166 u32 kc:4;
447 atomic64_t siga_outs; 167 u32:21;
448 atomic64_t siga_ins; 168 u32 isc:3;
449 atomic64_t siga_syncs; 169 u32 word_with_d_bit;
450 atomic64_t pcis; 170 u32:32;
451 atomic64_t thinints; 171 struct subchannel_id schid;
452 atomic64_t fast_reqs; 172 u32 reserved[1004];
453 173 struct chsc_header response;
454 atomic64_t outbound_cnt; 174 u32:32;
455 atomic64_t inbound_cnt; 175} __attribute__ ((packed));
456#else /* CONFIG_64BIT */ 176
457 atomic_t tl_runs; 177struct qdio_input_q {
458 atomic_t outbound_tl_runs; 178 /* input buffer acknowledgement flag */
459 atomic_t outbound_tl_runs_resched; 179 int polling;
460 atomic_t inbound_tl_runs; 180
461 atomic_t inbound_tl_runs_resched; 181 /* last time of noticing incoming data */
462 atomic_t inbound_thin_tl_runs; 182 u64 timestamp;
463 atomic_t inbound_thin_tl_runs_resched; 183
464 184 /* lock for clearing the acknowledgement */
465 atomic_t siga_outs; 185 spinlock_t lock;
466 atomic_t siga_ins;
467 atomic_t siga_syncs;
468 atomic_t pcis;
469 atomic_t thinints;
470 atomic_t fast_reqs;
471
472 atomic_t outbound_cnt;
473 atomic_t inbound_cnt;
474#endif /* CONFIG_64BIT */
475}; 186};
476 187
477/* unlikely as the later the better */ 188struct qdio_output_q {
478#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) 189 /* failed siga-w attempts*/
479#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ 190 atomic_t busy_siga_counter;
480 qdio_siga_sync(q,~0U,~0U)
481#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
482 qdio_siga_sync(q,~0U,0)
483 191
484#define NOW qdio_get_micros() 192 /* start time of busy condition */
485#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW 193 u64 timestamp;
486#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
487#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
488#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
489 194
490#define MY_MODULE_STRING(x) #x 195 /* PCIs are enabled for the queue */
196 int pci_out_enabled;
491 197
492#ifdef CONFIG_64BIT 198 /* timer to check for more outbound work */
493#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 199 struct timer_list timer;
494#else /* CONFIG_64BIT */ 200};
495#define QDIO_GET_ADDR(x) ((__u32)(long)x)
496#endif /* CONFIG_64BIT */
497 201
498struct qdio_q { 202struct qdio_q {
499 volatile struct slsb slsb; 203 struct slsb slsb;
204 union {
205 struct qdio_input_q in;
206 struct qdio_output_q out;
207 } u;
500 208
501 char unused[QDIO_MAX_BUFFERS_PER_Q]; 209 /* queue number */
210 int nr;
502 211
503 __u32 * dev_st_chg_ind; 212 /* bitmask of queue number */
213 int mask;
504 214
215 /* input or output queue */
505 int is_input_q; 216 int is_input_q;
506 struct subchannel_id schid;
507 struct ccw_device *cdev;
508
509 unsigned int is_iqdio_q;
510 unsigned int is_thinint_q;
511 217
512 /* bit 0 means queue 0, bit 1 means queue 1, ... */ 218 /* list of thinint input queues */
513 unsigned int mask; 219 struct list_head entry;
514 unsigned int q_no;
515 220
221 /* upper-layer program handler */
516 qdio_handler_t (*handler); 222 qdio_handler_t (*handler);
517 223
518 /* points to the next buffer to be checked for having 224 /*
519 * been processed by the card (outbound) 225 * inbound: next buffer the program should check for
520 * or to the next buffer the program should check for (inbound) */ 226 * outbound: next buffer to check for having been processed
521 volatile int first_to_check; 227 * by the card
522 /* and the last time it was: */ 228 */
523 volatile int last_move_ftc; 229 int first_to_check;
524 230
525 atomic_t number_of_buffers_used; 231 /* first_to_check of the last time */
526 atomic_t polling; 232 int last_move_ftc;
527 233
528 unsigned int siga_in; 234 /* beginning position for calling the program */
529 unsigned int siga_out; 235 int first_to_kick;
530 unsigned int siga_sync;
531 unsigned int siga_sync_done_on_thinints;
532 unsigned int siga_sync_done_on_outb_tis;
533 unsigned int hydra_gives_outbound_pcis;
534 236
535 /* used to save beginning position when calling dd_handlers */ 237 /* number of buffers in use by the adapter */
536 int first_element_to_kick; 238 atomic_t nr_buf_used;
537 239
538 atomic_t use_count; 240 struct qdio_irq *irq_ptr;
539 atomic_t is_in_shutdown;
540
541 void *irq_ptr;
542
543 struct timer_list timer;
544#ifdef QDIO_USE_TIMERS_FOR_POLLING
545 atomic_t timer_already_set;
546 spinlock_t timer_lock;
547#else /* QDIO_USE_TIMERS_FOR_POLLING */
548 struct tasklet_struct tasklet; 241 struct tasklet_struct tasklet;
549#endif /* QDIO_USE_TIMERS_FOR_POLLING */
550 242
551 243 /* error condition during a data transfer */
552 enum qdio_irq_states state;
553
554 /* used to store the error condition during a data transfer */
555 unsigned int qdio_error; 244 unsigned int qdio_error;
556 unsigned int siga_error;
557 unsigned int error_status_flags;
558
559 /* list of interesting queues */
560 volatile struct qdio_q *list_next;
561 volatile struct qdio_q *list_prev;
562 245
563 struct sl *sl; 246 struct sl *sl;
564 volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; 247 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
565 248
566 struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; 249 /*
567 250 * Warning: Leave this member at the end so it won't be cleared in
568 unsigned long int_parm; 251 * qdio_fill_qs. A page is allocated under this pointer and used for
569 252 * slib and sl. slib is 2048 bytes big and sl points to offset
570 /*struct { 253 * PAGE_SIZE / 2.
571 int in_bh_check_limit; 254 */
572 int threshold; 255 struct slib *slib;
573 } threshold_classes[QDIO_STATS_CLASSES];*/
574
575 struct {
576 /* inbound: the time to stop polling
577 outbound: the time to kick peer */
578 int threshold; /* the real value */
579
580 /* outbound: last time of do_QDIO
581 inbound: last time of noticing incoming data */
582 /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
583 int last_transfer_index; */
584
585 __u64 last_transfer_time;
586 __u64 busy_start;
587 } timing;
588 atomic_t busy_siga_counter;
589 unsigned int queue_type;
590 unsigned int is_pci_out;
591
592 /* leave this member at the end. won't be cleared in qdio_fill_qs */
593 struct slib *slib; /* a page is allocated under this pointer,
594 sl points into this page, offset PAGE_SIZE/2
595 (after slib) */
596} __attribute__ ((aligned(256))); 256} __attribute__ ((aligned(256)));
597 257
598struct qdio_irq { 258struct qdio_irq {
599 __u32 * volatile dev_st_chg_ind; 259 struct qib qib;
260 u32 *dsci; /* address of device state change indicator */
261 struct ccw_device *cdev;
600 262
601 unsigned long int_parm; 263 unsigned long int_parm;
602 struct subchannel_id schid; 264 struct subchannel_id schid;
603 265 unsigned long sch_token; /* QEBSM facility */
604 unsigned int is_iqdio_irq;
605 unsigned int is_thinint_irq;
606 unsigned int hydra_gives_outbound_pcis;
607 unsigned int sync_done_on_outb_pcis;
608
609 /* QEBSM facility */
610 unsigned int is_qebsm;
611 unsigned long sch_token;
612 266
613 enum qdio_irq_states state; 267 enum qdio_irq_states state;
614 268
615 unsigned int no_input_qs; 269 struct siga_flag siga_flag; /* siga sync information from qdioac */
616 unsigned int no_output_qs;
617 270
618 unsigned char qdioac; 271 int nr_input_qs;
272 int nr_output_qs;
619 273
620 struct ccw1 ccw; 274 struct ccw1 ccw;
621
622 struct ciw equeue; 275 struct ciw equeue;
623 struct ciw aqueue; 276 struct ciw aqueue;
624 277
625 struct qib qib; 278 struct qdio_ssqd_desc ssqd_desc;
626 279
627 void (*original_int_handler) (struct ccw_device *, 280 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
628 unsigned long, struct irb *);
629 281
630 /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ 282 /*
283 * Warning: Leave these members together at the end so they won't be
284 * cleared in qdio_setup_irq.
285 */
631 struct qdr *qdr; 286 struct qdr *qdr;
287 unsigned long chsc_page;
288
632 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; 289 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
633 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; 290 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
634 struct semaphore setting_up_sema; 291
292 struct mutex setup_mutex;
635}; 293};
636#endif 294
295/* helper functions */
296#define queue_type(q) q->irq_ptr->qib.qfmt
297
298#define is_thinint_irq(irq) \
299 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
300 css_general_characteristics.aif_osa)
301
302/* the highest iqdio queue is used for multicast */
303static inline int multicast_outbound(struct qdio_q *q)
304{
305 return (q->irq_ptr->nr_output_qs > 1) &&
306 (q->nr == q->irq_ptr->nr_output_qs - 1);
307}
308
309static inline unsigned long long get_usecs(void)
310{
311 return monotonic_clock() >> 12;
312}
313
314#define pci_out_supported(q) \
315 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
316#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
317
318#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
319#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
320#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
321#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
322#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
323#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
324
325#define for_each_input_queue(irq_ptr, q, i) \
326 for (i = 0, q = irq_ptr->input_qs[0]; \
327 i < irq_ptr->nr_input_qs; \
328 q = irq_ptr->input_qs[++i])
329#define for_each_output_queue(irq_ptr, q, i) \
330 for (i = 0, q = irq_ptr->output_qs[0]; \
331 i < irq_ptr->nr_output_qs; \
332 q = irq_ptr->output_qs[++i])
333
334#define prev_buf(bufnr) \
335 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
336#define next_buf(bufnr) \
337 ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
338#define add_buf(bufnr, inc) \
339 ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
340
341/* prototypes for thin interrupt */
342void qdio_sync_after_thinint(struct qdio_q *q);
343int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
344void qdio_check_outbound_after_thinint(struct qdio_q *q);
345int qdio_inbound_q_moved(struct qdio_q *q);
346void qdio_kick_inbound_handler(struct qdio_q *q);
347void qdio_stop_polling(struct qdio_q *q);
348int qdio_siga_sync_q(struct qdio_q *q);
349
350void qdio_setup_thinint(struct qdio_irq *irq_ptr);
351int qdio_establish_thinint(struct qdio_irq *irq_ptr);
352void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
353void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
354void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
355void tiqdio_inbound_processing(unsigned long q);
356int tiqdio_allocate_memory(void);
357void tiqdio_free_memory(void);
358int tiqdio_register_thinints(void);
359void tiqdio_unregister_thinints(void);
360
361/* prototypes for setup */
362void qdio_inbound_processing(unsigned long data);
363void qdio_outbound_processing(unsigned long data);
364void qdio_outbound_timer(unsigned long data);
365void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
366 struct irb *irb);
367int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
368 int nr_output_qs);
369void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
370int qdio_setup_irq(struct qdio_initialize *init_data);
371void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
372 struct ccw_device *cdev);
373void qdio_release_memory(struct qdio_irq *irq_ptr);
374int qdio_setup_init(void);
375void qdio_setup_exit(void);
376
377#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000000..337aa3087a78
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,240 @@
1/*
2 * drivers/s390/cio/qdio_debug.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/proc_fs.h>
9#include <linux/seq_file.h>
10#include <linux/debugfs.h>
11#include <asm/qdio.h>
12#include <asm/debug.h>
13#include "qdio_debug.h"
14#include "qdio.h"
15
16debug_info_t *qdio_dbf_setup;
17debug_info_t *qdio_dbf_trace;
18
19static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex);
23
24void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
25{
26 char dbf_text[20];
27
28 sprintf(dbf_text, "qfmt:%x", init_data->q_format);
29 QDIO_DBF_TEXT0(0, setup, dbf_text);
30 QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
31 sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
32 QDIO_DBF_TEXT0(0, setup, dbf_text);
33 QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
34 QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
35 QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
36 sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
37 QDIO_DBF_TEXT0(0, setup, dbf_text);
38 sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
39 QDIO_DBF_TEXT0(0, setup, dbf_text);
40 QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
41 QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
42 QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
43 QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
44 QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
45 QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
46}
47
48static void qdio_unregister_dbf_views(void)
49{
50 if (qdio_dbf_setup)
51 debug_unregister(qdio_dbf_setup);
52 if (qdio_dbf_trace)
53 debug_unregister(qdio_dbf_trace);
54}
55
56static int qdio_register_dbf_views(void)
57{
58 qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
59 QDIO_DBF_SETUP_NR_AREAS,
60 QDIO_DBF_SETUP_LEN);
61 if (!qdio_dbf_setup)
62 goto oom;
63 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
64 debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
65
66 qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
67 QDIO_DBF_TRACE_NR_AREAS,
68 QDIO_DBF_TRACE_LEN);
69 if (!qdio_dbf_trace)
70 goto oom;
71 debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
72 debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
73 return 0;
74oom:
75 qdio_unregister_dbf_views();
76 return -ENOMEM;
77}
78
79static int qstat_show(struct seq_file *m, void *v)
80{
81 unsigned char state;
82 struct qdio_q *q = m->private;
83 int i;
84
85 if (!q)
86 return 0;
87
88 seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
89 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
90 seq_printf(m, "ftc: %d\n", q->first_to_check);
91 seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
92 seq_printf(m, "polling: %d\n", q->u.in.polling);
93 seq_printf(m, "slsb buffer states:\n");
94
95 qdio_siga_sync_q(q);
96 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
97 get_buf_state(q, i, &state);
98 switch (state) {
99 case SLSB_P_INPUT_NOT_INIT:
100 case SLSB_P_OUTPUT_NOT_INIT:
101 seq_printf(m, "N");
102 break;
103 case SLSB_P_INPUT_PRIMED:
104 case SLSB_CU_OUTPUT_PRIMED:
105 seq_printf(m, "+");
106 break;
107 case SLSB_P_INPUT_ACK:
108 seq_printf(m, "A");
109 break;
110 case SLSB_P_INPUT_ERROR:
111 case SLSB_P_OUTPUT_ERROR:
112 seq_printf(m, "x");
113 break;
114 case SLSB_CU_INPUT_EMPTY:
115 case SLSB_P_OUTPUT_EMPTY:
116 seq_printf(m, "-");
117 break;
118 case SLSB_P_INPUT_HALTED:
119 case SLSB_P_OUTPUT_HALTED:
120 seq_printf(m, ".");
121 break;
122 default:
123 seq_printf(m, "?");
124 }
125 if (i == 63)
126 seq_printf(m, "\n");
127 }
128 seq_printf(m, "\n");
129 return 0;
130}
131
132static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
133 size_t count, loff_t *off)
134{
135 struct seq_file *seq = file->private_data;
136 struct qdio_q *q = seq->private;
137
138 if (!q)
139 return 0;
140
141 if (q->is_input_q)
142 xchg(q->irq_ptr->dsci, 1);
143 local_bh_disable();
144 tasklet_schedule(&q->tasklet);
145 local_bh_enable();
146 return count;
147}
148
149static int qstat_seq_open(struct inode *inode, struct file *filp)
150{
151 return single_open(filp, qstat_show,
152 filp->f_path.dentry->d_inode->i_private);
153}
154
155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
156{
157 memset(name, 0, sizeof(name));
158 sprintf(name, "%s", cdev->dev.bus_id);
159 if (q->is_input_q)
160 sprintf(name + strlen(name), "_input");
161 else
162 sprintf(name + strlen(name), "_output");
163 sprintf(name + strlen(name), "_%d", q->nr);
164}
165
166static void remove_debugfs_entry(struct qdio_q *q)
167{
168 int i;
169
170 for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
171 if (!debugfs_queues[i])
172 continue;
173 if (debugfs_queues[i]->d_inode->i_private == q) {
174 debugfs_remove(debugfs_queues[i]);
175 debugfs_queues[i] = NULL;
176 }
177 }
178}
179
180static struct file_operations debugfs_fops = {
181 .owner = THIS_MODULE,
182 .open = qstat_seq_open,
183 .read = seq_read,
184 .write = qstat_seq_write,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
189static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
190{
191 int i = 0;
192 char name[40];
193
194 while (debugfs_queues[i] != NULL) {
195 i++;
196 if (i >= MAX_DEBUGFS_QUEUES)
197 return;
198 }
199 get_queue_name(q, cdev, name);
200 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
201 debugfs_root, q, &debugfs_fops);
202}
203
204void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
205{
206 struct qdio_q *q;
207 int i;
208
209 mutex_lock(&debugfs_mutex);
210 for_each_input_queue(irq_ptr, q, i)
211 setup_debugfs_entry(q, cdev);
212 for_each_output_queue(irq_ptr, q, i)
213 setup_debugfs_entry(q, cdev);
214 mutex_unlock(&debugfs_mutex);
215}
216
217void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
218{
219 struct qdio_q *q;
220 int i;
221
222 mutex_lock(&debugfs_mutex);
223 for_each_input_queue(irq_ptr, q, i)
224 remove_debugfs_entry(q);
225 for_each_output_queue(irq_ptr, q, i)
226 remove_debugfs_entry(q);
227 mutex_unlock(&debugfs_mutex);
228}
229
230int __init qdio_debug_init(void)
231{
232 debugfs_root = debugfs_create_dir("qdio_queues", NULL);
233 return qdio_register_dbf_views();
234}
235
236void qdio_debug_exit(void)
237{
238 debugfs_remove(debugfs_root);
239 qdio_unregister_dbf_views();
240}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000000..8484b83698e1
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,91 @@
1/*
2 * drivers/s390/cio/qdio_debug.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_DEBUG_H
9#define QDIO_DEBUG_H
10
11#include <asm/debug.h>
12#include <asm/qdio.h>
13#include "qdio.h"
14
15#define QDIO_DBF_HEX(ex, name, level, addr, len) \
16 do { \
17 if (ex) \
18 debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
19 else \
20 debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
21 } while (0)
22#define QDIO_DBF_TEXT(ex, name, level, text) \
23 do { \
24 if (ex) \
25 debug_text_exception(qdio_dbf_##name, level, text); \
26 else \
27 debug_text_event(qdio_dbf_##name, level, text); \
28 } while (0)
29
30#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
31#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
32#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
33
34#ifdef CONFIG_QDIO_DEBUG
35#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
36#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
37#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
38#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
39#else
40#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
41#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
42#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
43#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
44#endif /* CONFIG_QDIO_DEBUG */
45
46#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
47#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
48#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
49
50#ifdef CONFIG_QDIO_DEBUG
51#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
52#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
53#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
54#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
55#else
56#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
57#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
58#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
59#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
60#endif /* CONFIG_QDIO_DEBUG */
61
62/* s390dbf views */
63#define QDIO_DBF_SETUP_LEN 8
64#define QDIO_DBF_SETUP_PAGES 4
65#define QDIO_DBF_SETUP_NR_AREAS 1
66
67#define QDIO_DBF_TRACE_LEN 8
68#define QDIO_DBF_TRACE_NR_AREAS 2
69
70#ifdef CONFIG_QDIO_DEBUG
71#define QDIO_DBF_TRACE_PAGES 16
72#define QDIO_DBF_SETUP_LEVEL 6
73#define QDIO_DBF_TRACE_LEVEL 4
74#else /* !CONFIG_QDIO_DEBUG */
75#define QDIO_DBF_TRACE_PAGES 4
76#define QDIO_DBF_SETUP_LEVEL 2
77#define QDIO_DBF_TRACE_LEVEL 2
78#endif /* CONFIG_QDIO_DEBUG */
79
80extern debug_info_t *qdio_dbf_setup;
81extern debug_info_t *qdio_dbf_trace;
82
83void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
84void debug_print_bstat(struct qdio_q *q);
85void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
86 struct ccw_device *cdev);
87void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
88 struct ccw_device *cdev);
89int qdio_debug_init(void);
90void qdio_debug_exit(void);
91#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000000..d10c73cc1688
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1755 @@
1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include <asm/atomic.h>
17#include <asm/debug.h>
18#include <asm/qdio.h>
19
20#include "cio.h"
21#include "css.h"
22#include "device.h"
23#include "qdio.h"
24#include "qdio_debug.h"
25#include "qdio_perf.h"
26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL");
31
32static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
34{
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
39 int cc;
40
41 asm volatile(
42 " siga 0\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc)
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47 return cc;
48}
49
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51{
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
55 int cc;
56
57 asm volatile(
58 " siga 0\n"
59 " ipm %0\n"
60 " srl %0,28\n"
61 : "=d" (cc)
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63 return cc;
64}
65
66/**
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
72 *
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 */
76static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
78{
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84 asm volatile(
85 " siga 0\n"
86 "0: ipm %0\n"
87 " srl %0,28\n"
88 "1:\n"
89 EX_TABLE(0b, 1b)
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 : : "cc", "memory");
92 *bb = ((unsigned int) __fc) >> 31;
93 return cc;
94}
95
96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97{
98 char dbf_text[15];
99
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
102 return 0;
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
105 return 1;
106 /* notify devices immediately */
107 sprintf(dbf_text, "%d", ccq);
108 QDIO_DBF_TEXT2(1, trace, dbf_text);
109 return -EIO;
110}
111
112/**
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
118 *
119 * Returns the number of successfull extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
121 */
122static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count)
124{
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
127 int nr = q->nr;
128 int rc;
129 char dbf_text[15];
130
131 BUG_ON(!q->irq_ptr->sch_token);
132
133 if (!q->is_input_q)
134 nr += q->irq_ptr->nr_input_qs;
135again:
136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
137 rc = qdio_check_ccq(q, ccq);
138
139 /* At least one buffer was processed, return and extract the remaining
140 * buffers later.
141 */
142 if ((ccq == 96) && (count != tmp_count))
143 return (count - tmp_count);
144 if (rc == 1) {
145 QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
146 goto again;
147 }
148
149 if (rc < 0) {
150 QDIO_DBF_TEXT2(1, trace, "eqberr");
151 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
152 QDIO_DBF_TEXT2(1, trace, dbf_text);
153 q->handler(q->irq_ptr->cdev,
154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
155 0, -1, -1, q->irq_ptr->int_parm);
156 return 0;
157 }
158 return count - tmp_count;
159}
160
161/**
162 * qdio_do_sqbs - set buffer states for QEBSM
163 * @q: queue to manipulate
164 * @state: new state of the buffers
165 * @start: first buffer number to change
166 * @count: how many buffers to change
167 *
168 * Returns the number of successfully changed buffers.
169 * Does retrying until the specified count of buffer states is set or an
170 * error occurs.
171 */
172static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
173 int count)
174{
175 unsigned int ccq = 0;
176 int tmp_count = count, tmp_start = start;
177 int nr = q->nr;
178 int rc;
179 char dbf_text[15];
180
181 BUG_ON(!q->irq_ptr->sch_token);
182
183 if (!q->is_input_q)
184 nr += q->irq_ptr->nr_input_qs;
185again:
186 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
187 rc = qdio_check_ccq(q, ccq);
188 if (rc == 1) {
189 QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
190 goto again;
191 }
192 if (rc < 0) {
193 QDIO_DBF_TEXT3(1, trace, "sqberr");
194 sprintf(dbf_text, "%2x,%2x", count, tmp_count);
195 QDIO_DBF_TEXT3(1, trace, dbf_text);
196 sprintf(dbf_text, "%d,%d", ccq, nr);
197 QDIO_DBF_TEXT3(1, trace, dbf_text);
198
199 q->handler(q->irq_ptr->cdev,
200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
201 0, -1, -1, q->irq_ptr->int_parm);
202 return 0;
203 }
204 WARN_ON(tmp_count);
205 return count - tmp_count;
206}
207
208/* returns number of examined buffers and their common state in *state */
209static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
210 unsigned char *state, unsigned int count)
211{
212 unsigned char __state = 0;
213 int i;
214
215 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
216 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
217
218 if (is_qebsm(q))
219 return qdio_do_eqbs(q, state, bufnr, count);
220
221 for (i = 0; i < count; i++) {
222 if (!__state)
223 __state = q->slsb.val[bufnr];
224 else if (q->slsb.val[bufnr] != __state)
225 break;
226 bufnr = next_buf(bufnr);
227 }
228 *state = __state;
229 return i;
230}
231
232inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
233 unsigned char *state)
234{
235 return get_buf_states(q, bufnr, state, 1);
236}
237
238/* wrap-around safe setting of slsb states, returns number of changed buffers */
239static inline int set_buf_states(struct qdio_q *q, int bufnr,
240 unsigned char state, int count)
241{
242 int i;
243
244 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
245 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
246
247 if (is_qebsm(q))
248 return qdio_do_sqbs(q, state, bufnr, count);
249
250 for (i = 0; i < count; i++) {
251 xchg(&q->slsb.val[bufnr], state);
252 bufnr = next_buf(bufnr);
253 }
254 return count;
255}
256
257static inline int set_buf_state(struct qdio_q *q, int bufnr,
258 unsigned char state)
259{
260 return set_buf_states(q, bufnr, state, 1);
261}
262
263/* set slsb states to initial state */
264void qdio_init_buf_states(struct qdio_irq *irq_ptr)
265{
266 struct qdio_q *q;
267 int i;
268
269 for_each_input_queue(irq_ptr, q, i)
270 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
271 QDIO_MAX_BUFFERS_PER_Q);
272 for_each_output_queue(irq_ptr, q, i)
273 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
274 QDIO_MAX_BUFFERS_PER_Q);
275}
276
277static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
278 unsigned int input)
279{
280 int cc;
281
282 if (!need_siga_sync(q))
283 return 0;
284
285 qdio_perf_stat_inc(&perf_stats.siga_sync);
286
287 cc = do_siga_sync(q->irq_ptr->schid, output, input);
288 if (cc) {
289 QDIO_DBF_TEXT4(0, trace, "sigasync");
290 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
291 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
292 }
293 return cc;
294}
295
296inline int qdio_siga_sync_q(struct qdio_q *q)
297{
298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask);
300 else
301 return qdio_siga_sync(q, q->mask, 0);
302}
303
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{
316 unsigned int fc = 0;
317 unsigned long schid;
318
319 if (!is_qebsm(q))
320 schid = *((u32 *)&q->irq_ptr->schid);
321 else {
322 schid = q->irq_ptr->sch_token;
323 fc |= 0x80;
324 }
325 return do_siga_output(schid, q->mask, busy_bit, fc);
326}
327
328static int qdio_siga_output(struct qdio_q *q)
329{
330 int cc;
331 u32 busy_bit;
332 u64 start_time = 0;
333
334 QDIO_DBF_TEXT5(0, trace, "sigaout");
335 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
336
337 qdio_perf_stat_inc(&perf_stats.siga_out);
338again:
339 cc = qdio_do_siga_output(q, &busy_bit);
340 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
341 if (!start_time)
342 start_time = get_usecs();
343 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
344 goto again;
345 }
346
347 if (cc == 2 && busy_bit)
348 cc |= QDIO_ERROR_SIGA_BUSY;
349 if (cc)
350 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
351 return cc;
352}
353
354static inline int qdio_siga_input(struct qdio_q *q)
355{
356 int cc;
357
358 QDIO_DBF_TEXT4(0, trace, "sigain");
359 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
360
361 qdio_perf_stat_inc(&perf_stats.siga_in);
362
363 cc = do_siga_input(q->irq_ptr->schid, q->mask);
364 if (cc)
365 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
366 return cc;
367}
368
369/* called from thinint inbound handler */
370void qdio_sync_after_thinint(struct qdio_q *q)
371{
372 if (pci_out_supported(q)) {
373 if (need_siga_sync_thinint(q))
374 qdio_siga_sync_all(q);
375 else if (need_siga_sync_out_thinint(q))
376 qdio_siga_sync_out(q);
377 } else
378 qdio_siga_sync_q(q);
379}
380
381inline void qdio_stop_polling(struct qdio_q *q)
382{
383 spin_lock_bh(&q->u.in.lock);
384 if (!q->u.in.polling) {
385 spin_unlock_bh(&q->u.in.lock);
386 return;
387 }
388 q->u.in.polling = 0;
389 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
390
391 /* show the card that we are not polling anymore */
392 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
393 spin_unlock_bh(&q->u.in.lock);
394}
395
396static void announce_buffer_error(struct qdio_q *q)
397{
398 char dbf_text[15];
399
400 if (q->is_input_q)
401 QDIO_DBF_TEXT3(1, trace, "inperr");
402 else
403 QDIO_DBF_TEXT3(0, trace, "outperr");
404
405 sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
406 q->sbal[q->first_to_check]->element[14].flags,
407 q->sbal[q->first_to_check]->element[15].flags);
408 QDIO_DBF_TEXT3(1, trace, dbf_text);
409 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
410
411 q->qdio_error = QDIO_ERROR_SLSB_STATE;
412}
413
414static int get_inbound_buffer_frontier(struct qdio_q *q)
415{
416 int count, stop;
417 unsigned char state;
418
419 /*
420 * If we still poll don't update last_move_ftc, keep the
421 * previously ACK buffer there.
422 */
423 if (!q->u.in.polling)
424 q->last_move_ftc = q->first_to_check;
425
426 /*
427 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
428 * would return 0.
429 */
430 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
431 stop = add_buf(q->first_to_check, count);
432
433 /*
434 * No siga sync here, as a PCI or we after a thin interrupt
435 * will sync the queues.
436 */
437
438 /* need to set count to 1 for non-qebsm */
439 if (!is_qebsm(q))
440 count = 1;
441
442check_next:
443 if (q->first_to_check == stop)
444 goto out;
445
446 count = get_buf_states(q, q->first_to_check, &state, count);
447 if (!count)
448 goto out;
449
450 switch (state) {
451 case SLSB_P_INPUT_PRIMED:
452 QDIO_DBF_TEXT5(0, trace, "inptprim");
453
454 /*
455 * Only ACK the first buffer. The ACK will be removed in
456 * qdio_stop_polling.
457 */
458 if (q->u.in.polling)
459 state = SLSB_P_INPUT_NOT_INIT;
460 else {
461 q->u.in.polling = 1;
462 state = SLSB_P_INPUT_ACK;
463 }
464 set_buf_state(q, q->first_to_check, state);
465
466 /*
467 * Need to change all PRIMED buffers to NOT_INIT, otherwise
468 * we're loosing initiative in the thinint code.
469 */
470 if (count > 1)
471 set_buf_states(q, next_buf(q->first_to_check),
472 SLSB_P_INPUT_NOT_INIT, count - 1);
473
474 /*
475 * No siga-sync needed for non-qebsm here, as the inbound queue
476 * will be synced on the next siga-r, resp.
477 * tiqdio_is_inbound_q_done will do the siga-sync.
478 */
479 q->first_to_check = add_buf(q->first_to_check, count);
480 atomic_sub(count, &q->nr_buf_used);
481 goto check_next;
482 case SLSB_P_INPUT_ERROR:
483 announce_buffer_error(q);
484 /* process the buffer, the upper layer will take care of it */
485 q->first_to_check = add_buf(q->first_to_check, count);
486 atomic_sub(count, &q->nr_buf_used);
487 break;
488 case SLSB_CU_INPUT_EMPTY:
489 case SLSB_P_INPUT_NOT_INIT:
490 case SLSB_P_INPUT_ACK:
491 QDIO_DBF_TEXT5(0, trace, "inpnipro");
492 break;
493 default:
494 BUG();
495 }
496out:
497 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
498 return q->first_to_check;
499}
500
501int qdio_inbound_q_moved(struct qdio_q *q)
502{
503 int bufnr;
504
505 bufnr = get_inbound_buffer_frontier(q);
506
507 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
508 if (!need_siga_sync(q) && !pci_out_supported(q))
509 q->u.in.timestamp = get_usecs();
510
511 QDIO_DBF_TEXT4(0, trace, "inhasmvd");
512 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
513 return 1;
514 } else
515 return 0;
516}
517
518static int qdio_inbound_q_done(struct qdio_q *q)
519{
520 unsigned char state;
521#ifdef CONFIG_QDIO_DEBUG
522 char dbf_text[15];
523#endif
524
525 if (!atomic_read(&q->nr_buf_used))
526 return 1;
527
528 /*
529 * We need that one for synchronization with the adapter, as it
530 * does a kind of PCI avoidance.
531 */
532 qdio_siga_sync_q(q);
533
534 get_buf_state(q, q->first_to_check, &state);
535 if (state == SLSB_P_INPUT_PRIMED)
536 /* we got something to do */
537 return 0;
538
539 /* on VM, we don't poll, so the q is always done here */
540 if (need_siga_sync(q) || pci_out_supported(q))
541 return 1;
542
543 /*
544 * At this point we know, that inbound first_to_check
545 * has (probably) not moved (see qdio_inbound_processing).
546 */
547 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
548#ifdef CONFIG_QDIO_DEBUG
549 QDIO_DBF_TEXT4(0, trace, "inqisdon");
550 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
551 sprintf(dbf_text, "pf%02x", q->first_to_check);
552 QDIO_DBF_TEXT4(0, trace, dbf_text);
553#endif /* CONFIG_QDIO_DEBUG */
554 return 1;
555 } else {
556#ifdef CONFIG_QDIO_DEBUG
557 QDIO_DBF_TEXT4(0, trace, "inqisntd");
558 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
559 sprintf(dbf_text, "pf%02x", q->first_to_check);
560 QDIO_DBF_TEXT4(0, trace, dbf_text);
561#endif /* CONFIG_QDIO_DEBUG */
562 return 0;
563 }
564}
565
566void qdio_kick_inbound_handler(struct qdio_q *q)
567{
568 int count, start, end;
569#ifdef CONFIG_QDIO_DEBUG
570 char dbf_text[15];
571#endif
572
573 qdio_perf_stat_inc(&perf_stats.inbound_handler);
574
575 start = q->first_to_kick;
576 end = q->first_to_check;
577 if (end >= start)
578 count = end - start;
579 else
580 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
581
582#ifdef CONFIG_QDIO_DEBUG
583 sprintf(dbf_text, "s=%2xc=%2x", start, count);
584 QDIO_DBF_TEXT4(0, trace, dbf_text);
585#endif /* CONFIG_QDIO_DEBUG */
586
587 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
588 return;
589
590 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
591 start, count, q->irq_ptr->int_parm);
592
593 /* for the next time */
594 q->first_to_kick = q->first_to_check;
595 q->qdio_error = 0;
596}
597
598static void __qdio_inbound_processing(struct qdio_q *q)
599{
600 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
601again:
602 if (!qdio_inbound_q_moved(q))
603 return;
604
605 qdio_kick_inbound_handler(q);
606
607 if (!qdio_inbound_q_done(q))
608 /* means poll time is not yet over */
609 goto again;
610
611 qdio_stop_polling(q);
612 /*
613 * We need to check again to not lose initiative after
614 * resetting the ACK state.
615 */
616 if (!qdio_inbound_q_done(q))
617 goto again;
618}
619
620/* inbound tasklet */
621void qdio_inbound_processing(unsigned long data)
622{
623 struct qdio_q *q = (struct qdio_q *)data;
624 __qdio_inbound_processing(q);
625}
626
627static int get_outbound_buffer_frontier(struct qdio_q *q)
628{
629 int count, stop;
630 unsigned char state;
631
632 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
633 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
634 qdio_siga_sync_q(q);
635
636 /*
637 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
638 * would return 0.
639 */
640 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
641 stop = add_buf(q->first_to_check, count);
642
643 /* need to set count to 1 for non-qebsm */
644 if (!is_qebsm(q))
645 count = 1;
646
647check_next:
648 if (q->first_to_check == stop)
649 return q->first_to_check;
650
651 count = get_buf_states(q, q->first_to_check, &state, count);
652 if (!count)
653 return q->first_to_check;
654
655 switch (state) {
656 case SLSB_P_OUTPUT_EMPTY:
657 /* the adapter got it */
658 QDIO_DBF_TEXT5(0, trace, "outpempt");
659
660 atomic_sub(count, &q->nr_buf_used);
661 q->first_to_check = add_buf(q->first_to_check, count);
662 /*
663 * We fetch all buffer states at once. get_buf_states may
664 * return count < stop. For QEBSM we do not loop.
665 */
666 if (is_qebsm(q))
667 break;
668 goto check_next;
669 case SLSB_P_OUTPUT_ERROR:
670 announce_buffer_error(q);
671 /* process the buffer, the upper layer will take care of it */
672 q->first_to_check = add_buf(q->first_to_check, count);
673 atomic_sub(count, &q->nr_buf_used);
674 break;
675 case SLSB_CU_OUTPUT_PRIMED:
676 /* the adapter has not fetched the output yet */
677 QDIO_DBF_TEXT5(0, trace, "outpprim");
678 break;
679 case SLSB_P_OUTPUT_NOT_INIT:
680 case SLSB_P_OUTPUT_HALTED:
681 break;
682 default:
683 BUG();
684 }
685 return q->first_to_check;
686}
687
688/* all buffers processed? */
689static inline int qdio_outbound_q_done(struct qdio_q *q)
690{
691 return atomic_read(&q->nr_buf_used) == 0;
692}
693
694static inline int qdio_outbound_q_moved(struct qdio_q *q)
695{
696 int bufnr;
697
698 bufnr = get_outbound_buffer_frontier(q);
699
700 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
701 q->last_move_ftc = bufnr;
702 QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
703 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
704 return 1;
705 } else
706 return 0;
707}
708
709/*
710 * VM could present us cc=2 and busy bit set on SIGA-write
711 * during reconfiguration of their Guest LAN (only in iqdio mode,
712 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
713 * the queues down immediately).
714 *
715 * Therefore qdio_siga_output will try for a short time constantly,
716 * if such a condition occurs. If it doesn't change, it will
717 * increase the busy_siga_counter and save the timestamp, and
718 * schedule the queue for later processing. qdio_outbound_processing
719 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
720 * as often as the value of the counter. This will attempt further SIGA
721 * instructions. For each successful SIGA, the counter is
722 * decreased, for failing SIGAs the counter remains the same, after
723 * all. After some time of no movement, qdio_kick_outbound_q will
724 * finally fail and reflect corresponding error codes to call
725 * the upper layer module and have it take the queues down.
726 *
727 * Note that this is a change from the original HiperSockets design
728 * (saying cc=2 and busy bit means take the queues down), but in
729 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
730 * conditions will still take the queues down, but the threshold is
731 * higher due to the Guest LAN environment.
732 *
733 * Called from outbound tasklet and do_QDIO handler.
734 */
735static void qdio_kick_outbound_q(struct qdio_q *q)
736{
737 int rc;
738#ifdef CONFIG_QDIO_DEBUG
739 char dbf_text[15];
740
741 QDIO_DBF_TEXT5(0, trace, "kickoutq");
742 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
743#endif /* CONFIG_QDIO_DEBUG */
744
745 if (!need_siga_out(q))
746 return;
747
748 rc = qdio_siga_output(q);
749 switch (rc) {
750 case 0:
751 /* went smooth this time, reset timestamp */
752 q->u.out.timestamp = 0;
753
754 /* TODO: improve error handling for CC=0 case */
755#ifdef CONFIG_QDIO_DEBUG
756 QDIO_DBF_TEXT3(0, trace, "cc2reslv");
757 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
758 atomic_read(&q->u.out.busy_siga_counter));
759 QDIO_DBF_TEXT3(0, trace, dbf_text);
760#endif /* CONFIG_QDIO_DEBUG */
761 break;
762 /* cc=2 and busy bit */
763 case (2 | QDIO_ERROR_SIGA_BUSY):
764 atomic_inc(&q->u.out.busy_siga_counter);
765
766 /* if the last siga was successful, save timestamp here */
767 if (!q->u.out.timestamp)
768 q->u.out.timestamp = get_usecs();
769
770 /* if we're in time, don't touch qdio_error */
771 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
772 tasklet_schedule(&q->tasklet);
773 break;
774 }
775 QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
776#ifdef CONFIG_QDIO_DEBUG
777 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
778 atomic_read(&q->u.out.busy_siga_counter));
779 QDIO_DBF_TEXT3(0, trace, dbf_text);
780#endif /* CONFIG_QDIO_DEBUG */
781 default:
782 /* for plain cc=1, 2 or 3 */
783 q->qdio_error = rc;
784 }
785}
786
787static void qdio_kick_outbound_handler(struct qdio_q *q)
788{
789 int start, end, count;
790#ifdef CONFIG_QDIO_DEBUG
791 char dbf_text[15];
792#endif
793
794 start = q->first_to_kick;
795 end = q->last_move_ftc;
796 if (end >= start)
797 count = end - start;
798 else
799 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
800
801#ifdef CONFIG_QDIO_DEBUG
802 QDIO_DBF_TEXT4(0, trace, "kickouth");
803 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
804
805 sprintf(dbf_text, "s=%2xc=%2x", start, count);
806 QDIO_DBF_TEXT4(0, trace, dbf_text);
807#endif /* CONFIG_QDIO_DEBUG */
808
809 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
810 return;
811
812 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
813 q->irq_ptr->int_parm);
814
815 /* for the next time: */
816 q->first_to_kick = q->last_move_ftc;
817 q->qdio_error = 0;
818}
819
820static void __qdio_outbound_processing(struct qdio_q *q)
821{
822 int siga_attempts;
823
824 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
825
826 /* see comment in qdio_kick_outbound_q */
827 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
828 while (siga_attempts--) {
829 atomic_dec(&q->u.out.busy_siga_counter);
830 qdio_kick_outbound_q(q);
831 }
832
833 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
834
835 if (qdio_outbound_q_moved(q))
836 qdio_kick_outbound_handler(q);
837
838 if (queue_type(q) == QDIO_ZFCP_QFMT) {
839 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
840 tasklet_schedule(&q->tasklet);
841 return;
842 }
843
844 /* bail out for HiperSockets unicast queues */
845 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
846 return;
847
848 if (q->u.out.pci_out_enabled)
849 return;
850
851 /*
852 * Now we know that queue type is either qeth without pci enabled
853 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
854 * EMPTY is noticed and outbound_handler is called after some time.
855 */
856 if (qdio_outbound_q_done(q))
857 del_timer(&q->u.out.timer);
858 else {
859 if (!timer_pending(&q->u.out.timer)) {
860 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
861 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
862 }
863 }
864}
865
866/* outbound tasklet */
867void qdio_outbound_processing(unsigned long data)
868{
869 struct qdio_q *q = (struct qdio_q *)data;
870 __qdio_outbound_processing(q);
871}
872
873void qdio_outbound_timer(unsigned long data)
874{
875 struct qdio_q *q = (struct qdio_q *)data;
876 tasklet_schedule(&q->tasklet);
877}
878
879/* called from thinint inbound tasklet */
880void qdio_check_outbound_after_thinint(struct qdio_q *q)
881{
882 struct qdio_q *out;
883 int i;
884
885 if (!pci_out_supported(q))
886 return;
887
888 for_each_output_queue(q->irq_ptr, out, i)
889 if (!qdio_outbound_q_done(out))
890 tasklet_schedule(&out->tasklet);
891}
892
893static inline void qdio_set_state(struct qdio_irq *irq_ptr,
894 enum qdio_irq_states state)
895{
896#ifdef CONFIG_QDIO_DEBUG
897 char dbf_text[15];
898
899 QDIO_DBF_TEXT5(0, trace, "newstate");
900 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
901 QDIO_DBF_TEXT5(0, trace, dbf_text);
902#endif /* CONFIG_QDIO_DEBUG */
903
904 irq_ptr->state = state;
905 mb();
906}
907
908static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
909{
910 char dbf_text[15];
911
912 if (irb->esw.esw0.erw.cons) {
913 sprintf(dbf_text, "sens%4x", schid.sch_no);
914 QDIO_DBF_TEXT2(1, trace, dbf_text);
915 QDIO_DBF_HEX0(0, trace, irb, 64);
916 QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
917 }
918}
919
920/* PCI interrupt handler */
921static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
922{
923 int i;
924 struct qdio_q *q;
925
926 qdio_perf_stat_inc(&perf_stats.pci_int);
927
928 for_each_input_queue(irq_ptr, q, i)
929 tasklet_schedule(&q->tasklet);
930
931 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
932 return;
933
934 for_each_output_queue(irq_ptr, q, i) {
935 if (qdio_outbound_q_done(q))
936 continue;
937
938 if (!siga_syncs_out_pci(q))
939 qdio_siga_sync_q(q);
940
941 tasklet_schedule(&q->tasklet);
942 }
943}
944
945static void qdio_handle_activate_check(struct ccw_device *cdev,
946 unsigned long intparm, int cstat, int dstat)
947{
948 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
949 struct qdio_q *q;
950 char dbf_text[15];
951
952 QDIO_DBF_TEXT2(1, trace, "ick2");
953 sprintf(dbf_text, "%s", cdev->dev.bus_id);
954 QDIO_DBF_TEXT2(1, trace, dbf_text);
955 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
956 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
957 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
958
959 if (irq_ptr->nr_input_qs) {
960 q = irq_ptr->input_qs[0];
961 } else if (irq_ptr->nr_output_qs) {
962 q = irq_ptr->output_qs[0];
963 } else {
964 dump_stack();
965 goto no_handler;
966 }
967 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
968 0, -1, -1, irq_ptr->int_parm);
969no_handler:
970 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
971}
972
973static void qdio_call_shutdown(struct work_struct *work)
974{
975 struct ccw_device_private *priv;
976 struct ccw_device *cdev;
977
978 priv = container_of(work, struct ccw_device_private, kick_work);
979 cdev = priv->cdev;
980 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
981 put_device(&cdev->dev);
982}
983
984static void qdio_int_error(struct ccw_device *cdev)
985{
986 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
987
988 switch (irq_ptr->state) {
989 case QDIO_IRQ_STATE_INACTIVE:
990 case QDIO_IRQ_STATE_CLEANUP:
991 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
992 break;
993 case QDIO_IRQ_STATE_ESTABLISHED:
994 case QDIO_IRQ_STATE_ACTIVE:
995 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
996 if (get_device(&cdev->dev)) {
997 /* Can't call shutdown from interrupt context. */
998 PREPARE_WORK(&cdev->private->kick_work,
999 qdio_call_shutdown);
1000 queue_work(ccw_device_work, &cdev->private->kick_work);
1001 }
1002 break;
1003 default:
1004 WARN_ON(1);
1005 }
1006 wake_up(&cdev->private->wait_q);
1007}
1008
1009static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
1010 int dstat)
1011{
1012 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1013
1014 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
1015 QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
1016 goto error;
1017 }
1018
1019 if (!(dstat & DEV_STAT_DEV_END)) {
1020 QDIO_DBF_TEXT2(1, setup, "eq:no de");
1021 goto error;
1022 }
1023
1024 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1025 QDIO_DBF_TEXT2(1, setup, "eq:badio");
1026 goto error;
1027 }
1028 return 0;
1029error:
1030 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
1031 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
1032 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1033 return 1;
1034}
1035
1036static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1037 int dstat)
1038{
1039 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1040 char dbf_text[15];
1041
1042 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
1043 QDIO_DBF_TEXT0(0, setup, dbf_text);
1044 QDIO_DBF_TEXT0(0, trace, dbf_text);
1045
1046 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1047 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1048}
1049
1050/* qdio interrupt handler */
1051void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1052 struct irb *irb)
1053{
1054 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1055 int cstat, dstat;
1056 char dbf_text[15];
1057
1058 qdio_perf_stat_inc(&perf_stats.qdio_int);
1059
1060 if (!intparm || !irq_ptr) {
1061 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
1062 QDIO_DBF_TEXT2(1, setup, dbf_text);
1063 return;
1064 }
1065
1066 if (IS_ERR(irb)) {
1067 switch (PTR_ERR(irb)) {
1068 case -EIO:
1069 sprintf(dbf_text, "ierr%4x",
1070 cdev->private->schid.sch_no);
1071 QDIO_DBF_TEXT2(1, setup, dbf_text);
1072 qdio_int_error(cdev);
1073 return;
1074 case -ETIMEDOUT:
1075 sprintf(dbf_text, "qtoh%4x",
1076 cdev->private->schid.sch_no);
1077 QDIO_DBF_TEXT2(1, setup, dbf_text);
1078 qdio_int_error(cdev);
1079 return;
1080 default:
1081 WARN_ON(1);
1082 return;
1083 }
1084 }
1085 qdio_irq_check_sense(irq_ptr->schid, irb);
1086
1087 cstat = irb->scsw.cmd.cstat;
1088 dstat = irb->scsw.cmd.dstat;
1089
1090 switch (irq_ptr->state) {
1091 case QDIO_IRQ_STATE_INACTIVE:
1092 qdio_establish_handle_irq(cdev, cstat, dstat);
1093 break;
1094
1095 case QDIO_IRQ_STATE_CLEANUP:
1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1097 break;
1098
1099 case QDIO_IRQ_STATE_ESTABLISHED:
1100 case QDIO_IRQ_STATE_ACTIVE:
1101 if (cstat & SCHN_STAT_PCI) {
1102 qdio_int_handler_pci(irq_ptr);
1103 /* no state change so no need to wake up wait_q */
1104 return;
1105 }
1106 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1107 qdio_handle_activate_check(cdev, intparm, cstat,
1108 dstat);
1109 break;
1110 }
1111 default:
1112 WARN_ON(1);
1113 }
1114 wake_up(&cdev->private->wait_q);
1115}
1116
1117/**
1118 * qdio_get_ssqd_desc - get qdio subchannel description
1119 * @cdev: ccw device to get description for
1120 *
1121 * Returns a pointer to the saved qdio subchannel description,
1122 * or NULL for not setup qdio devices.
1123 */
1124struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
1125{
1126 struct qdio_irq *irq_ptr;
1127
1128 QDIO_DBF_TEXT0(0, setup, "getssqd");
1129
1130 irq_ptr = cdev->private->qdio_data;
1131 if (!irq_ptr)
1132 return NULL;
1133
1134 return &irq_ptr->ssqd_desc;
1135}
1136EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1137
1138/**
1139 * qdio_cleanup - shutdown queues and free data structures
1140 * @cdev: associated ccw device
1141 * @how: use halt or clear to shutdown
1142 *
1143 * This function calls qdio_shutdown() for @cdev with method @how
1144 * and on success qdio_free() for @cdev.
1145 */
1146int qdio_cleanup(struct ccw_device *cdev, int how)
1147{
1148 struct qdio_irq *irq_ptr;
1149 char dbf_text[15];
1150 int rc;
1151
1152 irq_ptr = cdev->private->qdio_data;
1153 if (!irq_ptr)
1154 return -ENODEV;
1155
1156 sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
1157 QDIO_DBF_TEXT1(0, trace, dbf_text);
1158 QDIO_DBF_TEXT0(0, setup, dbf_text);
1159
1160 rc = qdio_shutdown(cdev, how);
1161 if (rc == 0)
1162 rc = qdio_free(cdev);
1163 return rc;
1164}
1165EXPORT_SYMBOL_GPL(qdio_cleanup);
1166
1167static void qdio_shutdown_queues(struct ccw_device *cdev)
1168{
1169 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1170 struct qdio_q *q;
1171 int i;
1172
1173 for_each_input_queue(irq_ptr, q, i)
1174 tasklet_disable(&q->tasklet);
1175
1176 for_each_output_queue(irq_ptr, q, i) {
1177 tasklet_disable(&q->tasklet);
1178 del_timer(&q->u.out.timer);
1179 }
1180}
1181
1182/**
1183 * qdio_shutdown - shut down a qdio subchannel
1184 * @cdev: associated ccw device
1185 * @how: use halt or clear to shutdown
1186 */
1187int qdio_shutdown(struct ccw_device *cdev, int how)
1188{
1189 struct qdio_irq *irq_ptr;
1190 int rc;
1191 unsigned long flags;
1192 char dbf_text[15];
1193
1194 irq_ptr = cdev->private->qdio_data;
1195 if (!irq_ptr)
1196 return -ENODEV;
1197
1198 mutex_lock(&irq_ptr->setup_mutex);
1199 /*
1200 * Subchannel was already shot down. We cannot prevent being called
1201 * twice since cio may trigger a shutdown asynchronously.
1202 */
1203 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1204 mutex_unlock(&irq_ptr->setup_mutex);
1205 return 0;
1206 }
1207
1208 sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
1209 QDIO_DBF_TEXT1(0, trace, dbf_text);
1210 QDIO_DBF_TEXT0(0, setup, dbf_text);
1211
1212 tiqdio_remove_input_queues(irq_ptr);
1213 qdio_shutdown_queues(cdev);
1214 qdio_shutdown_debug_entries(irq_ptr, cdev);
1215
1216 /* cleanup subchannel */
1217 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1218
1219 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1220 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1221 else
1222 /* default behaviour is halt */
1223 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1224 if (rc) {
1225 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
1226 QDIO_DBF_TEXT0(0, setup, dbf_text);
1227 sprintf(dbf_text, "rc=%d", rc);
1228 QDIO_DBF_TEXT0(0, setup, dbf_text);
1229 goto no_cleanup;
1230 }
1231
1232 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1233 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1234 wait_event_interruptible_timeout(cdev->private->wait_q,
1235 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1236 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1237 10 * HZ);
1238 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1239
1240no_cleanup:
1241 qdio_shutdown_thinint(irq_ptr);
1242
1243 /* restore interrupt handler */
1244 if ((void *)cdev->handler == (void *)qdio_int_handler)
1245 cdev->handler = irq_ptr->orig_handler;
1246 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1247
1248 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1249 mutex_unlock(&irq_ptr->setup_mutex);
1250 module_put(THIS_MODULE);
1251 if (rc)
1252 return rc;
1253 return 0;
1254}
1255EXPORT_SYMBOL_GPL(qdio_shutdown);
1256
1257/**
1258 * qdio_free - free data structures for a qdio subchannel
1259 * @cdev: associated ccw device
1260 */
1261int qdio_free(struct ccw_device *cdev)
1262{
1263 struct qdio_irq *irq_ptr;
1264 char dbf_text[15];
1265
1266 irq_ptr = cdev->private->qdio_data;
1267 if (!irq_ptr)
1268 return -ENODEV;
1269
1270 mutex_lock(&irq_ptr->setup_mutex);
1271
1272 sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
1273 QDIO_DBF_TEXT1(0, trace, dbf_text);
1274 QDIO_DBF_TEXT0(0, setup, dbf_text);
1275
1276 cdev->private->qdio_data = NULL;
1277 mutex_unlock(&irq_ptr->setup_mutex);
1278
1279 qdio_release_memory(irq_ptr);
1280 return 0;
1281}
1282EXPORT_SYMBOL_GPL(qdio_free);
1283
1284/**
1285 * qdio_initialize - allocate and establish queues for a qdio subchannel
1286 * @init_data: initialization data
1287 *
1288 * This function first allocates queues via qdio_allocate() and on success
1289 * establishes them via qdio_establish().
1290 */
1291int qdio_initialize(struct qdio_initialize *init_data)
1292{
1293 int rc;
1294 char dbf_text[15];
1295
1296 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
1297 QDIO_DBF_TEXT0(0, setup, dbf_text);
1298 QDIO_DBF_TEXT0(0, trace, dbf_text);
1299
1300 rc = qdio_allocate(init_data);
1301 if (rc)
1302 return rc;
1303
1304 rc = qdio_establish(init_data);
1305 if (rc)
1306 qdio_free(init_data->cdev);
1307 return rc;
1308}
1309EXPORT_SYMBOL_GPL(qdio_initialize);
1310
1311/**
1312 * qdio_allocate - allocate qdio queues and associated data
1313 * @init_data: initialization data
1314 */
1315int qdio_allocate(struct qdio_initialize *init_data)
1316{
1317 struct qdio_irq *irq_ptr;
1318 char dbf_text[15];
1319
1320 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
1321 QDIO_DBF_TEXT0(0, setup, dbf_text);
1322 QDIO_DBF_TEXT0(0, trace, dbf_text);
1323
1324 if ((init_data->no_input_qs && !init_data->input_handler) ||
1325 (init_data->no_output_qs && !init_data->output_handler))
1326 return -EINVAL;
1327
1328 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1329 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1330 return -EINVAL;
1331
1332 if ((!init_data->input_sbal_addr_array) ||
1333 (!init_data->output_sbal_addr_array))
1334 return -EINVAL;
1335
1336 qdio_allocate_do_dbf(init_data);
1337
1338 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1339 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1340 if (!irq_ptr)
1341 goto out_err;
1342 QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
1343 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
1344
1345 mutex_init(&irq_ptr->setup_mutex);
1346
1347 /*
1348 * Allocate a page for the chsc calls in qdio_establish.
1349 * Must be pre-allocated since a zfcp recovery will call
1350 * qdio_establish. In case of low memory and swap on a zfcp disk
1351 * we may not be able to allocate memory otherwise.
1352 */
1353 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1354 if (!irq_ptr->chsc_page)
1355 goto out_rel;
1356
1357 /* qdr is used in ccw1.cda which is u32 */
1358 irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
1359 if (!irq_ptr->qdr)
1360 goto out_rel;
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1362
1363 QDIO_DBF_TEXT0(0, setup, "qdr:");
1364 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
1365
1366 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1367 init_data->no_output_qs))
1368 goto out_rel;
1369
1370 init_data->cdev->private->qdio_data = irq_ptr;
1371 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1372 return 0;
1373out_rel:
1374 qdio_release_memory(irq_ptr);
1375out_err:
1376 return -ENOMEM;
1377}
1378EXPORT_SYMBOL_GPL(qdio_allocate);
1379
1380/**
1381 * qdio_establish - establish queues on a qdio subchannel
1382 * @init_data: initialization data
1383 */
1384int qdio_establish(struct qdio_initialize *init_data)
1385{
1386 char dbf_text[20];
1387 struct qdio_irq *irq_ptr;
1388 struct ccw_device *cdev = init_data->cdev;
1389 unsigned long saveflags;
1390 int rc;
1391
1392 irq_ptr = cdev->private->qdio_data;
1393 if (!irq_ptr)
1394 return -ENODEV;
1395
1396 if (cdev->private->state != DEV_STATE_ONLINE)
1397 return -EINVAL;
1398
1399 if (!try_module_get(THIS_MODULE))
1400 return -EINVAL;
1401
1402 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
1403 QDIO_DBF_TEXT0(0, setup, dbf_text);
1404 QDIO_DBF_TEXT0(0, trace, dbf_text);
1405
1406 mutex_lock(&irq_ptr->setup_mutex);
1407 qdio_setup_irq(init_data);
1408
1409 rc = qdio_establish_thinint(irq_ptr);
1410 if (rc) {
1411 mutex_unlock(&irq_ptr->setup_mutex);
1412 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1413 return rc;
1414 }
1415
1416 /* establish q */
1417 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1418 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1419 irq_ptr->ccw.count = irq_ptr->equeue.count;
1420 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1421
1422 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1423 ccw_device_set_options_mask(cdev, 0);
1424
1425 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1426 if (rc) {
1427 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
1428 QDIO_DBF_TEXT2(1, setup, dbf_text);
1429 sprintf(dbf_text, "eq:rc%4x", rc);
1430 QDIO_DBF_TEXT2(1, setup, dbf_text);
1431 }
1432 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1433
1434 if (rc) {
1435 mutex_unlock(&irq_ptr->setup_mutex);
1436 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1437 return rc;
1438 }
1439
1440 wait_event_interruptible_timeout(cdev->private->wait_q,
1441 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1442 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1443
1444 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1445 mutex_unlock(&irq_ptr->setup_mutex);
1446 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447 return -EIO;
1448 }
1449
1450 qdio_setup_ssqd_info(irq_ptr);
1451 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1452 QDIO_DBF_TEXT2(0, setup, dbf_text);
1453
1454 /* qebsm is now setup if available, initialize buffer states */
1455 qdio_init_buf_states(irq_ptr);
1456
1457 mutex_unlock(&irq_ptr->setup_mutex);
1458 qdio_print_subchannel_info(irq_ptr, cdev);
1459 qdio_setup_debug_entries(irq_ptr, cdev);
1460 return 0;
1461}
1462EXPORT_SYMBOL_GPL(qdio_establish);
1463
1464/**
1465 * qdio_activate - activate queues on a qdio subchannel
1466 * @cdev: associated cdev
1467 */
1468int qdio_activate(struct ccw_device *cdev)
1469{
1470 struct qdio_irq *irq_ptr;
1471 int rc;
1472 unsigned long saveflags;
1473 char dbf_text[20];
1474
1475 irq_ptr = cdev->private->qdio_data;
1476 if (!irq_ptr)
1477 return -ENODEV;
1478
1479 if (cdev->private->state != DEV_STATE_ONLINE)
1480 return -EINVAL;
1481
1482 mutex_lock(&irq_ptr->setup_mutex);
1483 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1484 rc = -EBUSY;
1485 goto out;
1486 }
1487
1488 sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
1489 QDIO_DBF_TEXT2(0, setup, dbf_text);
1490 QDIO_DBF_TEXT2(0, trace, dbf_text);
1491
1492 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1493 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1494 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1495 irq_ptr->ccw.cda = 0;
1496
1497 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1498 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1499
1500 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1501 0, DOIO_DENY_PREFETCH);
1502 if (rc) {
1503 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
1504 QDIO_DBF_TEXT2(1, setup, dbf_text);
1505 sprintf(dbf_text, "aq:rc%4x", rc);
1506 QDIO_DBF_TEXT2(1, setup, dbf_text);
1507 }
1508 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1509
1510 if (rc)
1511 goto out;
1512
1513 if (is_thinint_irq(irq_ptr))
1514 tiqdio_add_input_queues(irq_ptr);
1515
1516 /* wait for subchannel to become active */
1517 msleep(5);
1518
1519 switch (irq_ptr->state) {
1520 case QDIO_IRQ_STATE_STOPPED:
1521 case QDIO_IRQ_STATE_ERR:
1522 mutex_unlock(&irq_ptr->setup_mutex);
1523 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1524 return -EIO;
1525 default:
1526 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1527 rc = 0;
1528 }
1529out:
1530 mutex_unlock(&irq_ptr->setup_mutex);
1531 return rc;
1532}
1533EXPORT_SYMBOL_GPL(qdio_activate);
1534
1535static inline int buf_in_between(int bufnr, int start, int count)
1536{
1537 int end = add_buf(start, count);
1538
1539 if (end > start) {
1540 if (bufnr >= start && bufnr < end)
1541 return 1;
1542 else
1543 return 0;
1544 }
1545
1546 /* wrap-around case */
1547 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1548 (bufnr < end))
1549 return 1;
1550 else
1551 return 0;
1552}
1553
1554/**
1555 * handle_inbound - reset processed input buffers
1556 * @q: queue containing the buffers
1557 * @callflags: flags
1558 * @bufnr: first buffer to process
1559 * @count: how many buffers are emptied
1560 */
1561static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1562 int bufnr, int count)
1563{
1564 unsigned long flags;
1565 int used, rc;
1566
1567 /*
1568 * do_QDIO could run in parallel with the queue tasklet so the
1569 * upper-layer programm could empty the ACK'ed buffer here.
1570 * If that happens we must clear the polling flag, otherwise
1571 * qdio_stop_polling() could set the buffer to NOT_INIT after
1572 * it was set to EMPTY which would kill us.
1573 */
1574 spin_lock_irqsave(&q->u.in.lock, flags);
1575 if (q->u.in.polling)
1576 if (buf_in_between(q->last_move_ftc, bufnr, count))
1577 q->u.in.polling = 0;
1578
1579 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1580 spin_unlock_irqrestore(&q->u.in.lock, flags);
1581
1582 used = atomic_add_return(count, &q->nr_buf_used) - count;
1583 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1584
1585 /* no need to signal as long as the adapter had free buffers */
1586 if (used)
1587 return;
1588
1589 if (need_siga_in(q)) {
1590 rc = qdio_siga_input(q);
1591 if (rc)
1592 q->qdio_error = rc;
1593 }
1594}
1595
1596/**
1597 * handle_outbound - process filled outbound buffers
1598 * @q: queue containing the buffers
1599 * @callflags: flags
1600 * @bufnr: first buffer to process
1601 * @count: how many buffers are filled
1602 */
1603static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1604 int bufnr, int count)
1605{
1606 unsigned char state;
1607 int used;
1608
1609 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1610
1611 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1612 used = atomic_add_return(count, &q->nr_buf_used);
1613 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1614
1615 if (callflags & QDIO_FLAG_PCI_OUT)
1616 q->u.out.pci_out_enabled = 1;
1617 else
1618 q->u.out.pci_out_enabled = 0;
1619
1620 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1621 if (multicast_outbound(q))
1622 qdio_kick_outbound_q(q);
1623 else
1624 /*
1625 * One siga-w per buffer required for unicast
1626 * HiperSockets.
1627 */
1628 while (count--)
1629 qdio_kick_outbound_q(q);
1630 goto out;
1631 }
1632
1633 if (need_siga_sync(q)) {
1634 qdio_siga_sync_q(q);
1635 goto out;
1636 }
1637
1638 /* try to fast requeue buffers */
1639 get_buf_state(q, prev_buf(bufnr), &state);
1640 if (state != SLSB_CU_OUTPUT_PRIMED)
1641 qdio_kick_outbound_q(q);
1642 else {
1643 QDIO_DBF_TEXT5(0, trace, "fast-req");
1644 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1645 }
1646out:
1647 /* Fixme: could wait forever if called from process context */
1648 tasklet_schedule(&q->tasklet);
1649}
1650
1651/**
1652 * do_QDIO - process input or output buffers
1653 * @cdev: associated ccw_device for the qdio subchannel
1654 * @callflags: input or output and special flags from the program
1655 * @q_nr: queue number
1656 * @bufnr: buffer number
1657 * @count: how many buffers to process
1658 */
1659int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1660 int q_nr, int bufnr, int count)
1661{
1662 struct qdio_irq *irq_ptr;
1663#ifdef CONFIG_QDIO_DEBUG
1664 char dbf_text[20];
1665
1666 sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
1667 QDIO_DBF_TEXT3(0, trace, dbf_text);
1668#endif /* CONFIG_QDIO_DEBUG */
1669
1670 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1671 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1672 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1673 return -EINVAL;
1674
1675 if (!count)
1676 return 0;
1677
1678 irq_ptr = cdev->private->qdio_data;
1679 if (!irq_ptr)
1680 return -ENODEV;
1681
1682#ifdef CONFIG_QDIO_DEBUG
1683 if (callflags & QDIO_FLAG_SYNC_INPUT)
1684 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
1685 sizeof(void *));
1686 else
1687 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
1688 sizeof(void *));
1689
1690 sprintf(dbf_text, "flag%04x", callflags);
1691 QDIO_DBF_TEXT3(0, trace, dbf_text);
1692 sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
1693 QDIO_DBF_TEXT3(0, trace, dbf_text);
1694#endif /* CONFIG_QDIO_DEBUG */
1695
1696 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1697 return -EBUSY;
1698
1699 if (callflags & QDIO_FLAG_SYNC_INPUT)
1700 handle_inbound(irq_ptr->input_qs[q_nr],
1701 callflags, bufnr, count);
1702 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1703 handle_outbound(irq_ptr->output_qs[q_nr],
1704 callflags, bufnr, count);
1705 else {
1706 QDIO_DBF_TEXT3(1, trace, "doQD:inv");
1707 return -EINVAL;
1708 }
1709 return 0;
1710}
1711EXPORT_SYMBOL_GPL(do_QDIO);
1712
1713static int __init init_QDIO(void)
1714{
1715 int rc;
1716
1717 rc = qdio_setup_init();
1718 if (rc)
1719 return rc;
1720 rc = tiqdio_allocate_memory();
1721 if (rc)
1722 goto out_cache;
1723 rc = qdio_debug_init();
1724 if (rc)
1725 goto out_ti;
1726 rc = qdio_setup_perf_stats();
1727 if (rc)
1728 goto out_debug;
1729 rc = tiqdio_register_thinints();
1730 if (rc)
1731 goto out_perf;
1732 return 0;
1733
1734out_perf:
1735 qdio_remove_perf_stats();
1736out_debug:
1737 qdio_debug_exit();
1738out_ti:
1739 tiqdio_free_memory();
1740out_cache:
1741 qdio_setup_exit();
1742 return rc;
1743}
1744
1745static void __exit exit_QDIO(void)
1746{
1747 tiqdio_unregister_thinints();
1748 tiqdio_free_memory();
1749 qdio_remove_perf_stats();
1750 qdio_debug_exit();
1751 qdio_setup_exit();
1752}
1753
1754module_init(init_QDIO);
1755module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
new file mode 100644
index 000000000000..ea01b85b1cc9
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.c
@@ -0,0 +1,151 @@
1/*
2 * drivers/s390/cio/qdio_perf.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/kernel.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <asm/ccwdev.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio_debug.h"
19#include "qdio_perf.h"
20
21int qdio_performance_stats;
22struct qdio_perf_stats perf_stats;
23
24#ifdef CONFIG_PROC_FS
25static struct proc_dir_entry *qdio_perf_pde;
26#endif
27
28inline void qdio_perf_stat_inc(atomic_long_t *count)
29{
30 if (qdio_performance_stats)
31 atomic_long_inc(count);
32}
33
34inline void qdio_perf_stat_dec(atomic_long_t *count)
35{
36 if (qdio_performance_stats)
37 atomic_long_dec(count);
38}
39
40/*
41 * procfs functions
42 */
43static int qdio_perf_proc_show(struct seq_file *m, void *v)
44{
45 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
46 (long)atomic_long_read(&perf_stats.qdio_int));
47 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
48 (long)atomic_long_read(&perf_stats.pci_int));
49 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
50 (long)atomic_long_read(&perf_stats.thin_int));
51 seq_printf(m, "\n");
52 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
53 (long)atomic_long_read(&perf_stats.tasklet_inbound));
54 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
55 (long)atomic_long_read(&perf_stats.tasklet_outbound));
56 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
57 (long)atomic_long_read(&perf_stats.tasklet_thinint),
58 (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
59 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
60 (long)atomic_long_read(&perf_stats.thinint_inbound),
61 (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
62 seq_printf(m, "\n");
63 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
64 (long)atomic_long_read(&perf_stats.siga_in));
65 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
66 (long)atomic_long_read(&perf_stats.siga_out));
67 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.siga_sync));
69 seq_printf(m, "\n");
70 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
71 (long)atomic_long_read(&perf_stats.inbound_handler));
72 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
73 (long)atomic_long_read(&perf_stats.outbound_handler));
74 seq_printf(m, "\n");
75 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
76 (long)atomic_long_read(&perf_stats.fast_requeue));
77 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
78 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
79 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
80 (long)atomic_long_read(&perf_stats.debug_stop_polling));
81 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
82 (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
83 seq_printf(m, "\n");
84 return 0;
85}
86static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
87{
88 return single_open(filp, qdio_perf_proc_show, NULL);
89}
90
91static struct file_operations qdio_perf_proc_fops = {
92 .owner = THIS_MODULE,
93 .open = qdio_perf_seq_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
99/*
100 * sysfs functions
101 */
102static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
103{
104 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
105}
106
107static ssize_t qdio_perf_stats_store(struct bus_type *bus,
108 const char *buf, size_t count)
109{
110 unsigned long i;
111
112 if (strict_strtoul(buf, 16, &i) != 0)
113 return -EINVAL;
114 if ((i != 0) && (i != 1))
115 return -EINVAL;
116 if (i == qdio_performance_stats)
117 return count;
118
119 qdio_performance_stats = i;
120 /* reset performance statistics */
121 if (i == 0)
122 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
123 return count;
124}
125
126static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
127 qdio_perf_stats_store);
128
129int __init qdio_setup_perf_stats(void)
130{
131 int rc;
132
133 rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
134 if (rc)
135 return rc;
136
137#ifdef CONFIG_PROC_FS
138 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
139 qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
140 NULL, &qdio_perf_proc_fops);
141#endif
142 return 0;
143}
144
145void __exit qdio_remove_perf_stats(void)
146{
147#ifdef CONFIG_PROC_FS
148 remove_proc_entry("qdio_perf", NULL);
149#endif
150 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
151}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
new file mode 100644
index 000000000000..5c406a8b7387
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.h
@@ -0,0 +1,54 @@
1/*
2 * drivers/s390/cio/qdio_perf.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_PERF_H
9#define QDIO_PERF_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/atomic.h>
14
15struct qdio_perf_stats {
16 /* interrupt handler calls */
17 atomic_long_t qdio_int;
18 atomic_long_t pci_int;
19 atomic_long_t thin_int;
20
21 /* tasklet runs */
22 atomic_long_t tasklet_inbound;
23 atomic_long_t tasklet_outbound;
24 atomic_long_t tasklet_thinint;
25 atomic_long_t tasklet_thinint_loop;
26 atomic_long_t thinint_inbound;
27 atomic_long_t thinint_inbound_loop;
28 atomic_long_t thinint_inbound_loop2;
29
30 /* signal adapter calls */
31 atomic_long_t siga_out;
32 atomic_long_t siga_in;
33 atomic_long_t siga_sync;
34
35 /* misc */
36 atomic_long_t inbound_handler;
37 atomic_long_t outbound_handler;
38 atomic_long_t fast_requeue;
39
40 /* for debugging */
41 atomic_long_t debug_tl_out_timer;
42 atomic_long_t debug_stop_polling;
43};
44
45extern struct qdio_perf_stats perf_stats;
46extern int qdio_performance_stats;
47
48int qdio_setup_perf_stats(void);
49void qdio_remove_perf_stats(void);
50
51extern void qdio_perf_stat_inc(atomic_long_t *count);
52extern void qdio_perf_stat_dec(atomic_long_t *count);
53
54#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000000..f0923a8aceda
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,521 @@
1/*
2 * driver/s390/cio/qdio_setup.c
3 *
4 * qdio queue initialization
5 *
6 * Copyright (C) IBM Corp. 2008
7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
8 */
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <asm/qdio.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio.h"
19#include "qdio_debug.h"
20
21static struct kmem_cache *qdio_q_cache;
22
23/*
24 * qebsm is only available under 64bit but the adapter sets the feature
25 * flag anyway, so we manually override it.
26 */
27static inline int qebsm_possible(void)
28{
29#ifdef CONFIG_64BIT
30 return css_general_characteristics.qebsm;
31#endif
32 return 0;
33}
34
35/*
36 * qib_param_field: pointer to 128 bytes or NULL, if no param field
37 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
38 */
39static void set_impl_params(struct qdio_irq *irq_ptr,
40 unsigned int qib_param_field_format,
41 unsigned char *qib_param_field,
42 unsigned long *input_slib_elements,
43 unsigned long *output_slib_elements)
44{
45 struct qdio_q *q;
46 int i, j;
47
48 if (!irq_ptr)
49 return;
50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field,
55 QDIO_MAX_BUFFERS_PER_Q);
56
57 if (!input_slib_elements)
58 goto output;
59
60 for_each_input_queue(irq_ptr, q, i) {
61 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
62 q->slib->slibe[j].parms =
63 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
64 }
65output:
66 if (!output_slib_elements)
67 return;
68
69 for_each_output_queue(irq_ptr, q, i) {
70 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
71 q->slib->slibe[j].parms =
72 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
73 }
74}
75
76static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
77{
78 struct qdio_q *q;
79 int i;
80
81 for (i = 0; i < nr_queues; i++) {
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q)
84 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM;
91 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q;
94 }
95 return 0;
96}
97
98int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
99{
100 int rc;
101
102 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
103 if (rc)
104 return rc;
105 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
106 return rc;
107}
108
109static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
110 qdio_handler_t *handler, int i)
111{
112 /* must be cleared by every qdio_establish */
113 memset(q, 0, ((char *)&q->slib) - ((char *)q));
114 memset(q->slib, 0, PAGE_SIZE);
115
116 q->irq_ptr = irq_ptr;
117 q->mask = 1 << (31 - i);
118 q->nr = i;
119 q->handler = handler;
120}
121
122static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
123 void **sbals_array, char *dbf_text, int i)
124{
125 struct qdio_q *prev;
126 int j;
127
128 QDIO_DBF_TEXT0(0, setup, dbf_text);
129 QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
130
131 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
132
133 /* fill in sbal */
134 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
135 q->sbal[j] = *sbals_array++;
136 WARN_ON((unsigned long)q->sbal[j] & 0xff);
137 }
138
139 /* fill in slib */
140 if (i > 0) {
141 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
142 : irq_ptr->output_qs[i - 1];
143 prev->slib->nsliba = (unsigned long)q->slib;
144 }
145
146 q->slib->sla = (unsigned long)q->sl;
147 q->slib->slsba = (unsigned long)&q->slsb.val[0];
148
149 /* fill in sl */
150 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
151 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
152
153 QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
154 QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
155 QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
156 QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
157}
158
159static void setup_queues(struct qdio_irq *irq_ptr,
160 struct qdio_initialize *qdio_init)
161{
162 char dbf_text[20];
163 struct qdio_q *q;
164 void **input_sbal_array = qdio_init->input_sbal_addr_array;
165 void **output_sbal_array = qdio_init->output_sbal_addr_array;
166 int i;
167
168 sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
169 QDIO_DBF_TEXT0(0, setup, dbf_text);
170
171 for_each_input_queue(irq_ptr, q, i) {
172 sprintf(dbf_text, "in-q%4x", i);
173 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
174
175 q->is_input_q = 1;
176 spin_lock_init(&q->u.in.lock);
177 setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
178 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
179
180 if (is_thinint_irq(irq_ptr))
181 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
182 (unsigned long) q);
183 else
184 tasklet_init(&q->tasklet, qdio_inbound_processing,
185 (unsigned long) q);
186 }
187
188 for_each_output_queue(irq_ptr, q, i) {
189 sprintf(dbf_text, "outq%4x", i);
190 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
191
192 q->is_input_q = 0;
193 setup_storage_lists(q, irq_ptr, output_sbal_array,
194 dbf_text, i);
195 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
196
197 tasklet_init(&q->tasklet, qdio_outbound_processing,
198 (unsigned long) q);
199 setup_timer(&q->u.out.timer, (void(*)(unsigned long))
200 &qdio_outbound_timer, (unsigned long)q);
201 }
202}
203
204static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
205{
206 if (qdioac & AC1_SIGA_INPUT_NEEDED)
207 irq_ptr->siga_flag.input = 1;
208 if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
209 irq_ptr->siga_flag.output = 1;
210 if (qdioac & AC1_SIGA_SYNC_NEEDED)
211 irq_ptr->siga_flag.sync = 1;
212 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
213 irq_ptr->siga_flag.no_sync_ti = 1;
214 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
215 irq_ptr->siga_flag.no_sync_out_pci = 1;
216
217 if (irq_ptr->siga_flag.no_sync_out_pci &&
218 irq_ptr->siga_flag.no_sync_ti)
219 irq_ptr->siga_flag.no_sync_out_ti = 1;
220}
221
222static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
223 unsigned char qdioac, unsigned long token)
224{
225 char dbf_text[15];
226
227 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
228 goto no_qebsm;
229 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
230 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
231 goto no_qebsm;
232
233 irq_ptr->sch_token = token;
234
235 QDIO_DBF_TEXT0(0, setup, "V=V:1");
236 sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
237 QDIO_DBF_TEXT0(0, setup, dbf_text);
238 return;
239
240no_qebsm:
241 irq_ptr->sch_token = 0;
242 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
243 QDIO_DBF_TEXT0(0, setup, "noV=V");
244}
245
246static int __get_ssqd_info(struct qdio_irq *irq_ptr)
247{
248 struct chsc_ssqd_area *ssqd;
249 int rc;
250
251 QDIO_DBF_TEXT0(0, setup, "getssqd");
252 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
253 memset(ssqd, 0, PAGE_SIZE);
254
255 ssqd->request = (struct chsc_header) {
256 .length = 0x0010,
257 .code = 0x0024,
258 };
259 ssqd->first_sch = irq_ptr->schid.sch_no;
260 ssqd->last_sch = irq_ptr->schid.sch_no;
261 ssqd->ssid = irq_ptr->schid.ssid;
262
263 if (chsc(ssqd))
264 return -EIO;
265 rc = chsc_error_from_response(ssqd->response.code);
266 if (rc)
267 return rc;
268
269 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
270 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
271 (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
272 return -EINVAL;
273
274 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
275 sizeof(struct qdio_ssqd_desc));
276 return 0;
277}
278
279void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
280{
281 unsigned char qdioac;
282 char dbf_text[15];
283 int rc;
284
285 rc = __get_ssqd_info(irq_ptr);
286 if (rc) {
287 QDIO_DBF_TEXT2(0, setup, "ssqdasig");
288 sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
289 QDIO_DBF_TEXT2(0, setup, dbf_text);
290 sprintf(dbf_text, "rc:%d", rc);
291 QDIO_DBF_TEXT2(0, setup, dbf_text);
292 /* all flags set, worst case */
293 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
294 AC1_SIGA_SYNC_NEEDED;
295 } else
296 qdioac = irq_ptr->ssqd_desc.qdioac1;
297
298 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
299 process_ac_flags(irq_ptr, qdioac);
300
301 sprintf(dbf_text, "qdioac%2x", qdioac);
302 QDIO_DBF_TEXT2(0, setup, dbf_text);
303}
304
305void qdio_release_memory(struct qdio_irq *irq_ptr)
306{
307 struct qdio_q *q;
308 int i;
309
310 /*
311 * Must check queue array manually since irq_ptr->nr_input_queues /
312 * irq_ptr->nr_input_queues may not yet be set.
313 */
314 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
315 q = irq_ptr->input_qs[i];
316 if (q) {
317 free_page((unsigned long) q->slib);
318 kmem_cache_free(qdio_q_cache, q);
319 }
320 }
321 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
322 q = irq_ptr->output_qs[i];
323 if (q) {
324 free_page((unsigned long) q->slib);
325 kmem_cache_free(qdio_q_cache, q);
326 }
327 }
328 kfree(irq_ptr->qdr);
329 free_page(irq_ptr->chsc_page);
330 free_page((unsigned long) irq_ptr);
331}
332
333static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
334 struct qdio_q **irq_ptr_qs,
335 int i, int nr)
336{
337 irq_ptr->qdr->qdf0[i + nr].sliba =
338 (unsigned long)irq_ptr_qs[i]->slib;
339
340 irq_ptr->qdr->qdf0[i + nr].sla =
341 (unsigned long)irq_ptr_qs[i]->sl;
342
343 irq_ptr->qdr->qdf0[i + nr].slsba =
344 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
345
346 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
347 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
348 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
349 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
350}
351
352static void setup_qdr(struct qdio_irq *irq_ptr,
353 struct qdio_initialize *qdio_init)
354{
355 int i;
356
357 irq_ptr->qdr->qfmt = qdio_init->q_format;
358 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
359 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
360 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
361 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
362 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
363 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
364
365 for (i = 0; i < qdio_init->no_input_qs; i++)
366 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
367
368 for (i = 0; i < qdio_init->no_output_qs; i++)
369 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
370 qdio_init->no_input_qs);
371}
372
373static void setup_qib(struct qdio_irq *irq_ptr,
374 struct qdio_initialize *init_data)
375{
376 if (qebsm_possible())
377 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
378
379 irq_ptr->qib.qfmt = init_data->q_format;
380 if (init_data->no_input_qs)
381 irq_ptr->qib.isliba =
382 (unsigned long)(irq_ptr->input_qs[0]->slib);
383 if (init_data->no_output_qs)
384 irq_ptr->qib.osliba =
385 (unsigned long)(irq_ptr->output_qs[0]->slib);
386 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
387}
388
389int qdio_setup_irq(struct qdio_initialize *init_data)
390{
391 struct ciw *ciw;
392 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
393 int rc;
394
395 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
396 /* wipes qib.ac, required by ar7063 */
397 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
398
399 irq_ptr->int_parm = init_data->int_parm;
400 irq_ptr->nr_input_qs = init_data->no_input_qs;
401 irq_ptr->nr_output_qs = init_data->no_output_qs;
402
403 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
404 irq_ptr->cdev = init_data->cdev;
405 setup_queues(irq_ptr, init_data);
406
407 setup_qib(irq_ptr, init_data);
408 qdio_setup_thinint(irq_ptr);
409 set_impl_params(irq_ptr, init_data->qib_param_field_format,
410 init_data->qib_param_field,
411 init_data->input_slib_elements,
412 init_data->output_slib_elements);
413
414 /* fill input and output descriptors */
415 setup_qdr(irq_ptr, init_data);
416
417 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
418
419 /* get qdio commands */
420 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
421 if (!ciw) {
422 QDIO_DBF_TEXT2(1, setup, "no eq");
423 rc = -EINVAL;
424 goto out_err;
425 }
426 irq_ptr->equeue = *ciw;
427
428 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
429 if (!ciw) {
430 QDIO_DBF_TEXT2(1, setup, "no aq");
431 rc = -EINVAL;
432 goto out_err;
433 }
434 irq_ptr->aqueue = *ciw;
435
436 /* set new interrupt handler */
437 irq_ptr->orig_handler = init_data->cdev->handler;
438 init_data->cdev->handler = qdio_int_handler;
439 return 0;
440out_err:
441 qdio_release_memory(irq_ptr);
442 return rc;
443}
444
445void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
446 struct ccw_device *cdev)
447{
448 char s[80];
449
450 sprintf(s, "%s ", cdev->dev.bus_id);
451
452 switch (irq_ptr->qib.qfmt) {
453 case QDIO_QETH_QFMT:
454 sprintf(s + strlen(s), "OSADE ");
455 break;
456 case QDIO_ZFCP_QFMT:
457 sprintf(s + strlen(s), "ZFCP ");
458 break;
459 case QDIO_IQDIO_QFMT:
460 sprintf(s + strlen(s), "HiperSockets ");
461 break;
462 }
463 sprintf(s + strlen(s), "using: ");
464
465 if (!is_thinint_irq(irq_ptr))
466 sprintf(s + strlen(s), "no");
467 sprintf(s + strlen(s), "AdapterInterrupts ");
468 if (!(irq_ptr->sch_token != 0))
469 sprintf(s + strlen(s), "no");
470 sprintf(s + strlen(s), "QEBSM ");
471 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
472 sprintf(s + strlen(s), "no");
473 sprintf(s + strlen(s), "OutboundPCI ");
474 if (!css_general_characteristics.aif_tdd)
475 sprintf(s + strlen(s), "no");
476 sprintf(s + strlen(s), "TDD\n");
477 printk(KERN_INFO "qdio: %s", s);
478
479 memset(s, 0, sizeof(s));
480 sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
481 if (irq_ptr->siga_flag.input)
482 sprintf(s + strlen(s), "Read ");
483 if (irq_ptr->siga_flag.output)
484 sprintf(s + strlen(s), "Write ");
485 if (irq_ptr->siga_flag.sync)
486 sprintf(s + strlen(s), "Sync ");
487 if (!irq_ptr->siga_flag.no_sync_ti)
488 sprintf(s + strlen(s), "SyncAI ");
489 if (!irq_ptr->siga_flag.no_sync_out_ti)
490 sprintf(s + strlen(s), "SyncOutAI ");
491 if (!irq_ptr->siga_flag.no_sync_out_pci)
492 sprintf(s + strlen(s), "SyncOutPCI");
493 sprintf(s + strlen(s), "\n");
494 printk(KERN_INFO "qdio: %s", s);
495}
496
497int __init qdio_setup_init(void)
498{
499 char dbf_text[15];
500
501 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
502 256, 0, NULL);
503 if (!qdio_q_cache)
504 return -ENOMEM;
505
506 /* Check for OSA/FCP thin interrupts (bit 67). */
507 sprintf(dbf_text, "thini%1x",
508 (css_general_characteristics.aif_osa) ? 1 : 0);
509 QDIO_DBF_TEXT0(0, setup, dbf_text);
510
511 /* Check for QEBSM support in general (bit 58). */
512 sprintf(dbf_text, "cssQBS:%1x",
513 (qebsm_possible()) ? 1 : 0);
514 QDIO_DBF_TEXT0(0, setup, dbf_text);
515 return 0;
516}
517
518void __exit qdio_setup_exit(void)
519{
520 kmem_cache_destroy(qdio_q_cache);
521}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000000..9291a771d812
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,380 @@
1/*
2 * linux/drivers/s390/cio/thinint_qdio.c
3 *
4 * thin interrupt support for qdio
5 *
6 * Copyright 2000-2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Jan Glauber <jang@linux.vnet.ibm.com>
10 */
11#include <linux/io.h>
12#include <asm/atomic.h>
13#include <asm/debug.h>
14#include <asm/qdio.h>
15#include <asm/airq.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "ioasm.h"
20#include "qdio.h"
21#include "qdio_debug.h"
22#include "qdio_perf.h"
23
24/*
25 * Restriction: only 63 iqdio subchannels would have its own indicator,
26 * after that, subsequent subchannels share one indicator
27 */
28#define TIQDIO_NR_NONSHARED_IND 63
29#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
30#define TIQDIO_SHARED_IND 63
31
32/* list of thin interrupt input queues */
33static LIST_HEAD(tiq_list);
34
35/* adapter local summary indicator */
36static unsigned char *tiqdio_alsi;
37
38/* device state change indicators */
39struct indicator_t {
40 u32 ind; /* u32 because of compare-and-swap performance */
41 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
42};
43static struct indicator_t *q_indicators;
44
45static void tiqdio_tasklet_fn(unsigned long data);
46static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
47
48static int css_qdio_omit_svs;
49
50static inline unsigned long do_clear_global_summary(void)
51{
52 register unsigned long __fn asm("1") = 3;
53 register unsigned long __tmp asm("2");
54 register unsigned long __time asm("3");
55
56 asm volatile(
57 " .insn rre,0xb2650000,2,0"
58 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
59 return __time;
60}
61
62/* returns addr for the device state change indicator */
63static u32 *get_indicator(void)
64{
65 int i;
66
67 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
68 if (!atomic_read(&q_indicators[i].count)) {
69 atomic_set(&q_indicators[i].count, 1);
70 return &q_indicators[i].ind;
71 }
72
73 /* use the shared indicator */
74 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
75 return &q_indicators[TIQDIO_SHARED_IND].ind;
76}
77
78static void put_indicator(u32 *addr)
79{
80 int i;
81
82 if (!addr)
83 return;
84 i = ((unsigned long)addr - (unsigned long)q_indicators) /
85 sizeof(struct indicator_t);
86 atomic_dec(&q_indicators[i].count);
87}
88
89void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
90{
91 struct qdio_q *q;
92 int i;
93
94 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
96 css_qdio_omit_svs = 1;
97
98 for_each_input_queue(irq_ptr, q, i) {
99 list_add_rcu(&q->entry, &tiq_list);
100 synchronize_rcu();
101 }
102 xchg(irq_ptr->dsci, 1);
103 tasklet_schedule(&tiqdio_tasklet);
104}
105
106/*
107 * we cannot stop the tiqdio tasklet here since it is for all
108 * thinint qdio devices and it must run as long as there is a
109 * thinint device left
110 */
111void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112{
113 struct qdio_q *q;
114 int i;
115
116 for_each_input_queue(irq_ptr, q, i) {
117 list_del_rcu(&q->entry);
118 synchronize_rcu();
119 }
120}
121
122static inline int tiqdio_inbound_q_done(struct qdio_q *q)
123{
124 unsigned char state;
125
126 if (!atomic_read(&q->nr_buf_used))
127 return 1;
128
129 qdio_siga_sync_q(q);
130 get_buf_state(q, q->first_to_check, &state);
131
132 if (state == SLSB_P_INPUT_PRIMED)
133 /* more work coming */
134 return 0;
135 return 1;
136}
137
138static inline int shared_ind(struct qdio_irq *irq_ptr)
139{
140 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
141}
142
143static void __tiqdio_inbound_processing(struct qdio_q *q)
144{
145 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
146 qdio_sync_after_thinint(q);
147
148 /*
149 * Maybe we have work on our outbound queues... at least
150 * we have to check the PCI capable queues.
151 */
152 qdio_check_outbound_after_thinint(q);
153
154again:
155 if (!qdio_inbound_q_moved(q))
156 return;
157
158 qdio_kick_inbound_handler(q);
159
160 if (!tiqdio_inbound_q_done(q)) {
161 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
162 goto again;
163 }
164
165 qdio_stop_polling(q);
166 /*
167 * We need to check again to not lose initiative after
168 * resetting the ACK state.
169 */
170 if (!tiqdio_inbound_q_done(q)) {
171 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
172 goto again;
173 }
174}
175
176void tiqdio_inbound_processing(unsigned long data)
177{
178 struct qdio_q *q = (struct qdio_q *)data;
179
180 __tiqdio_inbound_processing(q);
181}
182
183/* check for work on all inbound thinint queues */
184static void tiqdio_tasklet_fn(unsigned long data)
185{
186 struct qdio_q *q;
187
188 qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
189again:
190
191 /* protect tiq_list entries, only changed in activate or shutdown */
192 rcu_read_lock();
193
194 list_for_each_entry_rcu(q, &tiq_list, entry)
195 /* only process queues from changed sets */
196 if (*q->irq_ptr->dsci) {
197
198 /* only clear it if the indicator is non-shared */
199 if (!shared_ind(q->irq_ptr))
200 xchg(q->irq_ptr->dsci, 0);
201 /*
202 * don't call inbound processing directly since
203 * that could starve other thinint queues
204 */
205 tasklet_schedule(&q->tasklet);
206 }
207
208 rcu_read_unlock();
209
210 /*
211 * if we used the shared indicator clear it now after all queues
212 * were processed
213 */
214 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
215 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
216
217 /* prevent racing */
218 if (*tiqdio_alsi)
219 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
220 }
221
222 /* check for more work */
223 if (*tiqdio_alsi) {
224 xchg(tiqdio_alsi, 0);
225 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
226 goto again;
227 }
228}
229
230/**
231 * tiqdio_thinint_handler - thin interrupt handler for qdio
232 * @ind: pointer to adapter local summary indicator
233 * @drv_data: NULL
234 */
235static void tiqdio_thinint_handler(void *ind, void *drv_data)
236{
237 qdio_perf_stat_inc(&perf_stats.thin_int);
238
239 /*
240 * SVS only when needed: issue SVS to benefit from iqdio interrupt
241 * avoidance (SVS clears adapter interrupt suppression overwrite)
242 */
243 if (!css_qdio_omit_svs)
244 do_clear_global_summary();
245
246 /*
247 * reset local summary indicator (tiqdio_alsi) to stop adapter
248 * interrupts for now, the tasklet will clean all dsci's
249 */
250 xchg((u8 *)ind, 0);
251 tasklet_hi_schedule(&tiqdio_tasklet);
252}
253
254static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
255{
256 struct scssc_area *scssc_area;
257 char dbf_text[15];
258 void *ptr;
259 int rc;
260
261 scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
262 memset(scssc_area, 0, PAGE_SIZE);
263
264 if (reset) {
265 scssc_area->summary_indicator_addr = 0;
266 scssc_area->subchannel_indicator_addr = 0;
267 } else {
268 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
269 scssc_area->subchannel_indicator_addr =
270 virt_to_phys(irq_ptr->dsci);
271 }
272
273 scssc_area->request = (struct chsc_header) {
274 .length = 0x0fe0,
275 .code = 0x0021,
276 };
277 scssc_area->operation_code = 0;
278 scssc_area->ks = PAGE_DEFAULT_KEY;
279 scssc_area->kc = PAGE_DEFAULT_KEY;
280 scssc_area->isc = QDIO_AIRQ_ISC;
281 scssc_area->schid = irq_ptr->schid;
282
283 /* enable the time delay disablement facility */
284 if (css_general_characteristics.aif_tdd)
285 scssc_area->word_with_d_bit = 0x10000000;
286
287 rc = chsc(scssc_area);
288 if (rc)
289 return -EIO;
290
291 rc = chsc_error_from_response(scssc_area->response.code);
292 if (rc) {
293 sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
294 QDIO_DBF_TEXT1(0, trace, dbf_text);
295 QDIO_DBF_TEXT1(0, setup, dbf_text);
296 ptr = &scssc_area->response;
297 QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
298 return rc;
299 }
300
301 QDIO_DBF_TEXT2(0, setup, "setscind");
302 QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
303 sizeof(unsigned long));
304 QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
305 sizeof(unsigned long));
306 return 0;
307}
308
309/* allocate non-shared indicators and shared indicator */
310int __init tiqdio_allocate_memory(void)
311{
312 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
313 GFP_KERNEL);
314 if (!q_indicators)
315 return -ENOMEM;
316 return 0;
317}
318
319void tiqdio_free_memory(void)
320{
321 kfree(q_indicators);
322}
323
324int __init tiqdio_register_thinints(void)
325{
326 char dbf_text[20];
327
328 isc_register(QDIO_AIRQ_ISC);
329 tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
330 NULL, QDIO_AIRQ_ISC);
331 if (IS_ERR(tiqdio_alsi)) {
332 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
333 QDIO_DBF_TEXT0(0, setup, dbf_text);
334 tiqdio_alsi = NULL;
335 isc_unregister(QDIO_AIRQ_ISC);
336 return -ENOMEM;
337 }
338 return 0;
339}
340
341int qdio_establish_thinint(struct qdio_irq *irq_ptr)
342{
343 if (!is_thinint_irq(irq_ptr))
344 return 0;
345
346 /* Check for aif time delay disablement. If installed,
347 * omit SVS even under LPAR
348 */
349 if (css_general_characteristics.aif_tdd)
350 css_qdio_omit_svs = 1;
351 return set_subchannel_ind(irq_ptr, 0);
352}
353
354void qdio_setup_thinint(struct qdio_irq *irq_ptr)
355{
356 if (!is_thinint_irq(irq_ptr))
357 return;
358 irq_ptr->dsci = get_indicator();
359 QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
360}
361
362void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
363{
364 if (!is_thinint_irq(irq_ptr))
365 return;
366
367 /* reset adapter interrupt indicators */
368 put_indicator(irq_ptr->dsci);
369 set_subchannel_ind(irq_ptr, 1);
370}
371
372void __exit tiqdio_unregister_thinints(void)
373{
374 tasklet_disable(&tiqdio_tasklet);
375
376 if (tiqdio_alsi) {
377 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
378 isc_unregister(QDIO_AIRQ_ISC);
379 }
380}
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
deleted file mode 100644
index 54328fec5ade..000000000000
--- a/drivers/s390/cio/schid.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
new file mode 100644
index 000000000000..f8da25ab576d
--- /dev/null
+++ b/drivers/s390/cio/scsw.c
@@ -0,0 +1,843 @@
1/*
2 * Helper functions for scsw access.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <asm/cio.h>
11#include "css.h"
12#include "chsc.h"
13
14/**
15 * scsw_is_tm - check for transport mode scsw
16 * @scsw: pointer to scsw
17 *
18 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise.
20 */
21int scsw_is_tm(union scsw *scsw)
22{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24}
25EXPORT_SYMBOL(scsw_is_tm);
26
27/**
28 * scsw_key - return scsw key field
29 * @scsw: pointer to scsw
30 *
31 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw.
33 */
34u32 scsw_key(union scsw *scsw)
35{
36 if (scsw_is_tm(scsw))
37 return scsw->tm.key;
38 else
39 return scsw->cmd.key;
40}
41EXPORT_SYMBOL(scsw_key);
42
43/**
44 * scsw_eswf - return scsw eswf field
45 * @scsw: pointer to scsw
46 *
47 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw.
49 */
50u32 scsw_eswf(union scsw *scsw)
51{
52 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf;
54 else
55 return scsw->cmd.eswf;
56}
57EXPORT_SYMBOL(scsw_eswf);
58
59/**
60 * scsw_cc - return scsw cc field
61 * @scsw: pointer to scsw
62 *
63 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw.
65 */
66u32 scsw_cc(union scsw *scsw)
67{
68 if (scsw_is_tm(scsw))
69 return scsw->tm.cc;
70 else
71 return scsw->cmd.cc;
72}
73EXPORT_SYMBOL(scsw_cc);
74
75/**
76 * scsw_ectl - return scsw ectl field
77 * @scsw: pointer to scsw
78 *
79 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw.
81 */
82u32 scsw_ectl(union scsw *scsw)
83{
84 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl;
86 else
87 return scsw->cmd.ectl;
88}
89EXPORT_SYMBOL(scsw_ectl);
90
91/**
92 * scsw_pno - return scsw pno field
93 * @scsw: pointer to scsw
94 *
95 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw.
97 */
98u32 scsw_pno(union scsw *scsw)
99{
100 if (scsw_is_tm(scsw))
101 return scsw->tm.pno;
102 else
103 return scsw->cmd.pno;
104}
105EXPORT_SYMBOL(scsw_pno);
106
107/**
108 * scsw_fctl - return scsw fctl field
109 * @scsw: pointer to scsw
110 *
111 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw.
113 */
114u32 scsw_fctl(union scsw *scsw)
115{
116 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl;
118 else
119 return scsw->cmd.fctl;
120}
121EXPORT_SYMBOL(scsw_fctl);
122
123/**
124 * scsw_actl - return scsw actl field
125 * @scsw: pointer to scsw
126 *
127 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw.
129 */
130u32 scsw_actl(union scsw *scsw)
131{
132 if (scsw_is_tm(scsw))
133 return scsw->tm.actl;
134 else
135 return scsw->cmd.actl;
136}
137EXPORT_SYMBOL(scsw_actl);
138
139/**
140 * scsw_stctl - return scsw stctl field
141 * @scsw: pointer to scsw
142 *
143 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw.
145 */
146u32 scsw_stctl(union scsw *scsw)
147{
148 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl;
150 else
151 return scsw->cmd.stctl;
152}
153EXPORT_SYMBOL(scsw_stctl);
154
155/**
156 * scsw_dstat - return scsw dstat field
157 * @scsw: pointer to scsw
158 *
159 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw.
161 */
162u32 scsw_dstat(union scsw *scsw)
163{
164 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat;
166 else
167 return scsw->cmd.dstat;
168}
169EXPORT_SYMBOL(scsw_dstat);
170
171/**
172 * scsw_cstat - return scsw cstat field
173 * @scsw: pointer to scsw
174 *
175 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw.
177 */
178u32 scsw_cstat(union scsw *scsw)
179{
180 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat;
182 else
183 return scsw->cmd.cstat;
184}
185EXPORT_SYMBOL(scsw_cstat);
186
187/**
188 * scsw_cmd_is_valid_key - check key field validity
189 * @scsw: pointer to scsw
190 *
191 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise.
193 */
194int scsw_cmd_is_valid_key(union scsw *scsw)
195{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199
200/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity
202 * @scsw: pointer to scsw
203 *
204 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise.
206 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212
213/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity
215 * @scsw: pointer to scsw
216 *
217 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise.
219 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225
226/**
227 * scsw_cmd_is_valid_cc - check cc field validity
228 * @scsw: pointer to scsw
229 *
230 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise.
232 */
233int scsw_cmd_is_valid_cc(union scsw *scsw)
234{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239
240/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity
242 * @scsw: pointer to scsw
243 *
244 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise.
246 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252
253/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity
255 * @scsw: pointer to scsw
256 *
257 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise.
259 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265
266/**
267 * scsw_cmd_is_valid_isic - check isic field validity
268 * @scsw: pointer to scsw
269 *
270 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise.
272 */
273int scsw_cmd_is_valid_isic(union scsw *scsw)
274{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278
279/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity
281 * @scsw: pointer to scsw
282 *
283 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise.
285 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291
292/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity
294 * @scsw: pointer to scsw
295 *
296 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise.
298 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304
305/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity
307 * @scsw: pointer to scsw
308 *
309 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise.
311 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318
319/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity
321 * @scsw: pointer to scsw
322 *
323 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise.
325 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333
334/**
335 * scsw_cmd_is_valid_pno - check pno field validity
336 * @scsw: pointer to scsw
337 *
338 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise.
340 */
341int scsw_cmd_is_valid_pno(union scsw *scsw)
342{
343 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
345 (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350
351/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity
353 * @scsw: pointer to scsw
354 *
355 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise.
357 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{
360 /* Only valid if pmcw.dnv == 1*/
361 return 1;
362}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364
365/**
366 * scsw_cmd_is_valid_actl - check actl field validity
367 * @scsw: pointer to scsw
368 *
369 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise.
371 */
372int scsw_cmd_is_valid_actl(union scsw *scsw)
373{
374 /* Only valid if pmcw.dnv == 1*/
375 return 1;
376}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378
379/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity
381 * @scsw: pointer to scsw
382 *
383 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise.
385 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{
388 /* Only valid if pmcw.dnv == 1*/
389 return 1;
390}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392
393/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity
395 * @scsw: pointer to scsw
396 *
397 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise.
399 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3);
404}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406
407/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity
409 * @scsw: pointer to scsw
410 *
411 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise.
413 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3);
418}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420
421/**
422 * scsw_tm_is_valid_key - check key field validity
423 * @scsw: pointer to scsw
424 *
425 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise.
427 */
428int scsw_tm_is_valid_key(union scsw *scsw)
429{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433
434/**
435 * scsw_tm_is_valid_eswf - check eswf field validity
436 * @scsw: pointer to scsw
437 *
438 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise.
440 */
441int scsw_tm_is_valid_eswf(union scsw *scsw)
442{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446
447/**
448 * scsw_tm_is_valid_cc - check cc field validity
449 * @scsw: pointer to scsw
450 *
451 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise.
453 */
454int scsw_tm_is_valid_cc(union scsw *scsw)
455{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460
461/**
462 * scsw_tm_is_valid_fmt - check fmt field validity
463 * @scsw: pointer to scsw
464 *
465 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise.
467 */
468int scsw_tm_is_valid_fmt(union scsw *scsw)
469{
470 return 1;
471}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473
474/**
475 * scsw_tm_is_valid_x - check x field validity
476 * @scsw: pointer to scsw
477 *
478 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise.
480 */
481int scsw_tm_is_valid_x(union scsw *scsw)
482{
483 return 1;
484}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486
487/**
488 * scsw_tm_is_valid_q - check q field validity
489 * @scsw: pointer to scsw
490 *
491 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise.
493 */
494int scsw_tm_is_valid_q(union scsw *scsw)
495{
496 return 1;
497}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499
500/**
501 * scsw_tm_is_valid_ectl - check ectl field validity
502 * @scsw: pointer to scsw
503 *
504 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise.
506 */
507int scsw_tm_is_valid_ectl(union scsw *scsw)
508{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514
515/**
516 * scsw_tm_is_valid_pno - check pno field validity
517 * @scsw: pointer to scsw
518 *
519 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise.
521 */
522int scsw_tm_is_valid_pno(union scsw *scsw)
523{
524 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
526 (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531
532/**
533 * scsw_tm_is_valid_fctl - check fctl field validity
534 * @scsw: pointer to scsw
535 *
536 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise.
538 */
539int scsw_tm_is_valid_fctl(union scsw *scsw)
540{
541 /* Only valid if pmcw.dnv == 1*/
542 return 1;
543}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545
546/**
547 * scsw_tm_is_valid_actl - check actl field validity
548 * @scsw: pointer to scsw
549 *
550 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise.
552 */
553int scsw_tm_is_valid_actl(union scsw *scsw)
554{
555 /* Only valid if pmcw.dnv == 1*/
556 return 1;
557}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559
560/**
561 * scsw_tm_is_valid_stctl - check stctl field validity
562 * @scsw: pointer to scsw
563 *
564 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise.
566 */
567int scsw_tm_is_valid_stctl(union scsw *scsw)
568{
569 /* Only valid if pmcw.dnv == 1*/
570 return 1;
571}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573
574/**
575 * scsw_tm_is_valid_dstat - check dstat field validity
576 * @scsw: pointer to scsw
577 *
578 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise.
580 */
581int scsw_tm_is_valid_dstat(union scsw *scsw)
582{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3);
585}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587
588/**
589 * scsw_tm_is_valid_cstat - check cstat field validity
590 * @scsw: pointer to scsw
591 *
592 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise.
594 */
595int scsw_tm_is_valid_cstat(union scsw *scsw)
596{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3);
599}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601
602/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity
604 * @scsw: pointer to scsw
605 *
606 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise.
608 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{
611 return 1;
612}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614
615/**
616 * scsw_tm_is_valid_schxs - check schxs field validity
617 * @scsw: pointer to scsw
618 *
619 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise.
621 */
622int scsw_tm_is_valid_schxs(union scsw *scsw)
623{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK));
628}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630
631/**
632 * scsw_is_valid_actl - check actl field validity
633 * @scsw: pointer to scsw
634 *
635 * Return non-zero if the actl field of the specified scsw is valid,
636 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value.
638 */
639int scsw_is_valid_actl(union scsw *scsw)
640{
641 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw);
643 else
644 return scsw_cmd_is_valid_actl(scsw);
645}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647
648/**
649 * scsw_is_valid_cc - check cc field validity
650 * @scsw: pointer to scsw
651 *
652 * Return non-zero if the cc field of the specified scsw is valid,
653 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value.
655 */
656int scsw_is_valid_cc(union scsw *scsw)
657{
658 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw);
660 else
661 return scsw_cmd_is_valid_cc(scsw);
662}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664
665/**
666 * scsw_is_valid_cstat - check cstat field validity
667 * @scsw: pointer to scsw
668 *
669 * Return non-zero if the cstat field of the specified scsw is valid,
670 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value.
672 */
673int scsw_is_valid_cstat(union scsw *scsw)
674{
675 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw);
677 else
678 return scsw_cmd_is_valid_cstat(scsw);
679}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681
682/**
683 * scsw_is_valid_dstat - check dstat field validity
684 * @scsw: pointer to scsw
685 *
686 * Return non-zero if the dstat field of the specified scsw is valid,
687 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value.
689 */
690int scsw_is_valid_dstat(union scsw *scsw)
691{
692 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw);
694 else
695 return scsw_cmd_is_valid_dstat(scsw);
696}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698
699/**
700 * scsw_is_valid_ectl - check ectl field validity
701 * @scsw: pointer to scsw
702 *
703 * Return non-zero if the ectl field of the specified scsw is valid,
704 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value.
706 */
707int scsw_is_valid_ectl(union scsw *scsw)
708{
709 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw);
711 else
712 return scsw_cmd_is_valid_ectl(scsw);
713}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715
716/**
717 * scsw_is_valid_eswf - check eswf field validity
718 * @scsw: pointer to scsw
719 *
720 * Return non-zero if the eswf field of the specified scsw is valid,
721 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value.
723 */
724int scsw_is_valid_eswf(union scsw *scsw)
725{
726 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw);
728 else
729 return scsw_cmd_is_valid_eswf(scsw);
730}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732
733/**
734 * scsw_is_valid_fctl - check fctl field validity
735 * @scsw: pointer to scsw
736 *
737 * Return non-zero if the fctl field of the specified scsw is valid,
738 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value.
740 */
741int scsw_is_valid_fctl(union scsw *scsw)
742{
743 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw);
745 else
746 return scsw_cmd_is_valid_fctl(scsw);
747}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749
750/**
751 * scsw_is_valid_key - check key field validity
752 * @scsw: pointer to scsw
753 *
754 * Return non-zero if the key field of the specified scsw is valid,
755 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value.
757 */
758int scsw_is_valid_key(union scsw *scsw)
759{
760 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw);
762 else
763 return scsw_cmd_is_valid_key(scsw);
764}
765EXPORT_SYMBOL(scsw_is_valid_key);
766
767/**
768 * scsw_is_valid_pno - check pno field validity
769 * @scsw: pointer to scsw
770 *
771 * Return non-zero if the pno field of the specified scsw is valid,
772 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value.
774 */
775int scsw_is_valid_pno(union scsw *scsw)
776{
777 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw);
779 else
780 return scsw_cmd_is_valid_pno(scsw);
781}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783
784/**
785 * scsw_is_valid_stctl - check stctl field validity
786 * @scsw: pointer to scsw
787 *
788 * Return non-zero if the stctl field of the specified scsw is valid,
789 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value.
791 */
792int scsw_is_valid_stctl(union scsw *scsw)
793{
794 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw);
796 else
797 return scsw_cmd_is_valid_stctl(scsw);
798}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800
801/**
802 * scsw_cmd_is_solicited - check for solicited scsw
803 * @scsw: pointer to scsw
804 *
805 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited.
807 */
808int scsw_cmd_is_solicited(union scsw *scsw)
809{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814
815/**
816 * scsw_tm_is_solicited - check for solicited scsw
817 * @scsw: pointer to scsw
818 *
819 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited.
821 */
822int scsw_tm_is_solicited(union scsw *scsw)
823{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828
829/**
830 * scsw_is_solicited - check for solicited scsw
831 * @scsw: pointer to scsw
832 *
833 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited.
835 */
836int scsw_is_solicited(union scsw *scsw)
837{
838 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw);
840 else
841 return scsw_cmd_is_solicited(scsw);
842}
843EXPORT_SYMBOL(scsw_is_solicited);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1ab3e3efd11..62b6b55230d0 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -34,13 +34,15 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <asm/s390_rdev.h> 35#include <asm/s390_rdev.h>
36#include <asm/reset.h> 36#include <asm/reset.h>
37#include <linux/hrtimer.h>
38#include <linux/ktime.h>
37 39
38#include "ap_bus.h" 40#include "ap_bus.h"
39 41
40/* Some prototypes. */ 42/* Some prototypes. */
41static void ap_scan_bus(struct work_struct *); 43static void ap_scan_bus(struct work_struct *);
42static void ap_poll_all(unsigned long); 44static void ap_poll_all(unsigned long);
43static void ap_poll_timeout(unsigned long); 45static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
44static int ap_poll_thread_start(void); 46static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void); 47static void ap_poll_thread_stop(void);
46static void ap_request_timeout(unsigned long); 48static void ap_request_timeout(unsigned long);
@@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus);
80/* 82/*
81 * Tasklet & timer for AP request polling. 83 * Tasklet & timer for AP request polling.
82 */ 84 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
84static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 85static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
85static atomic_t ap_poll_requests = ATOMIC_INIT(0); 86static atomic_t ap_poll_requests = ATOMIC_INIT(0);
86static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 87static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
87static struct task_struct *ap_poll_kthread = NULL; 88static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex); 89static DEFINE_MUTEX(ap_poll_thread_mutex);
90static struct hrtimer ap_poll_timer;
91/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
92 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
93static unsigned long long poll_timeout = 250000;
89 94
90/** 95/**
91 * ap_intructions_available() - Test if AP instructions are available. 96 * ap_intructions_available() - Test if AP instructions are available.
@@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
636 641
637static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 642static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
638 643
644static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
645{
646 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
647}
648
649static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
650 size_t count)
651{
652 unsigned long long time;
653 ktime_t hr_time;
654
655 /* 120 seconds = maximum poll interval */
656 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000)
657 return -EINVAL;
658 poll_timeout = time;
659 hr_time = ktime_set(0, poll_timeout);
660
661 if (!hrtimer_is_queued(&ap_poll_timer) ||
662 !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) {
663 ap_poll_timer.expires = hr_time;
664 hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS);
665 }
666 return count;
667}
668
669static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
670
639static struct bus_attribute *const ap_bus_attrs[] = { 671static struct bus_attribute *const ap_bus_attrs[] = {
640 &bus_attr_ap_domain, 672 &bus_attr_ap_domain,
641 &bus_attr_config_time, 673 &bus_attr_config_time,
642 &bus_attr_poll_thread, 674 &bus_attr_poll_thread,
643 NULL 675 &bus_attr_poll_timeout,
676 NULL,
644}; 677};
645 678
646/** 679/**
@@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr)
895 */ 928 */
896static inline void ap_schedule_poll_timer(void) 929static inline void ap_schedule_poll_timer(void)
897{ 930{
898 if (timer_pending(&ap_poll_timer)) 931 if (hrtimer_is_queued(&ap_poll_timer))
899 return; 932 return;
900 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); 933 hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
934 HRTIMER_MODE_ABS);
901} 935}
902 936
903/** 937/**
@@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message);
1115 1149
1116/** 1150/**
1117 * ap_poll_timeout(): AP receive polling for finished AP requests. 1151 * ap_poll_timeout(): AP receive polling for finished AP requests.
1118 * @unused: Unused variable. 1152 * @unused: Unused pointer.
1119 * 1153 *
1120 * Schedules the AP tasklet. 1154 * Schedules the AP tasklet using a high resolution timer.
1121 */ 1155 */
1122static void ap_poll_timeout(unsigned long unused) 1156static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1123{ 1157{
1124 tasklet_schedule(&ap_tasklet); 1158 tasklet_schedule(&ap_tasklet);
1159 return HRTIMER_NORESTART;
1125} 1160}
1126 1161
1127/** 1162/**
@@ -1344,6 +1379,14 @@ int __init ap_module_init(void)
1344 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1379 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1345 add_timer(&ap_config_timer); 1380 add_timer(&ap_config_timer);
1346 1381
1382 /* Setup the high resultion poll timer.
1383 * If we are running under z/VM adjust polling to z/VM polling rate.
1384 */
1385 if (MACHINE_IS_VM)
1386 poll_timeout = 1500000;
1387 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1388 ap_poll_timer.function = ap_poll_timeout;
1389
1347 /* Start the low priority AP bus poll thread. */ 1390 /* Start the low priority AP bus poll thread. */
1348 if (ap_thread_flag) { 1391 if (ap_thread_flag) {
1349 rc = ap_poll_thread_start(); 1392 rc = ap_poll_thread_start();
@@ -1355,7 +1398,7 @@ int __init ap_module_init(void)
1355 1398
1356out_work: 1399out_work:
1357 del_timer_sync(&ap_config_timer); 1400 del_timer_sync(&ap_config_timer);
1358 del_timer_sync(&ap_poll_timer); 1401 hrtimer_cancel(&ap_poll_timer);
1359 destroy_workqueue(ap_work_queue); 1402 destroy_workqueue(ap_work_queue);
1360out_root: 1403out_root:
1361 s390_root_dev_unregister(ap_root_device); 1404 s390_root_dev_unregister(ap_root_device);
@@ -1386,7 +1429,7 @@ void ap_module_exit(void)
1386 ap_reset_domain(); 1429 ap_reset_domain();
1387 ap_poll_thread_stop(); 1430 ap_poll_thread_stop();
1388 del_timer_sync(&ap_config_timer); 1431 del_timer_sync(&ap_config_timer);
1389 del_timer_sync(&ap_poll_timer); 1432 hrtimer_cancel(&ap_poll_timer);
1390 destroy_workqueue(ap_work_queue); 1433 destroy_workqueue(ap_work_queue);
1391 tasklet_kill(&ap_tasklet); 1434 tasklet_kill(&ap_tasklet);
1392 s390_root_dev_unregister(ap_root_device); 1435 s390_root_dev_unregister(ap_root_device);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index c1e1200c43fc..446378b308fc 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -92,6 +92,8 @@ struct ap_queue_status {
92#define AP_DEVICE_TYPE_PCIXCC 5 92#define AP_DEVICE_TYPE_PCIXCC 5
93#define AP_DEVICE_TYPE_CEX2A 6 93#define AP_DEVICE_TYPE_CEX2A 6
94#define AP_DEVICE_TYPE_CEX2C 7 94#define AP_DEVICE_TYPE_CEX2C 7
95#define AP_DEVICE_TYPE_CEX2A2 8
96#define AP_DEVICE_TYPE_CEX2C2 9
95 97
96/* 98/*
97 * AP reset flag states 99 * AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4d36e805a234..cb22b97944b8 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -34,6 +34,7 @@
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/smp_lock.h>
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <linux/hw_random.h> 40#include <linux/hw_random.h>
@@ -300,7 +301,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
300 */ 301 */
301static int zcrypt_open(struct inode *inode, struct file *filp) 302static int zcrypt_open(struct inode *inode, struct file *filp)
302{ 303{
304 lock_kernel();
303 atomic_inc(&zcrypt_open_count); 305 atomic_inc(&zcrypt_open_count);
306 unlock_kernel();
304 return 0; 307 return 0;
305} 308}
306 309
@@ -1068,10 +1071,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1068 1071
1069#define LBUFSIZE 1200UL 1072#define LBUFSIZE 1200UL
1070 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1073 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1071 if (!lbuf) { 1074 if (!lbuf)
1072 PRINTK("kmalloc failed!\n");
1073 return 0; 1075 return 0;
1074 }
1075 1076
1076 local_count = min(LBUFSIZE - 1, count); 1077 local_count = min(LBUFSIZE - 1, count);
1077 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1078 if (copy_from_user(lbuf, buffer, local_count) != 0) {
@@ -1081,23 +1082,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1081 lbuf[local_count] = '\0'; 1082 lbuf[local_count] = '\0';
1082 1083
1083 ptr = strstr(lbuf, "Online devices"); 1084 ptr = strstr(lbuf, "Online devices");
1084 if (!ptr) { 1085 if (!ptr)
1085 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1086 goto out; 1086 goto out;
1087 }
1088 ptr = strstr(ptr, "\n"); 1087 ptr = strstr(ptr, "\n");
1089 if (!ptr) { 1088 if (!ptr)
1090 PRINTK("Unable to parse data (missing newline "
1091 "after \"Online devices\")\n");
1092 goto out; 1089 goto out;
1093 }
1094 ptr++; 1090 ptr++;
1095 1091
1096 if (strstr(ptr, "Waiting work element counts") == NULL) { 1092 if (strstr(ptr, "Waiting work element counts") == NULL)
1097 PRINTK("Unable to parse data (missing "
1098 "\"Waiting work element counts\")\n");
1099 goto out; 1093 goto out;
1100 }
1101 1094
1102 for (j = 0; j < 64 && *ptr; ptr++) { 1095 for (j = 0; j < 64 && *ptr; ptr++) {
1103 /* 1096 /*
@@ -1197,16 +1190,12 @@ int __init zcrypt_api_init(void)
1197 1190
1198 /* Register the request sprayer. */ 1191 /* Register the request sprayer. */
1199 rc = misc_register(&zcrypt_misc_device); 1192 rc = misc_register(&zcrypt_misc_device);
1200 if (rc < 0) { 1193 if (rc < 0)
1201 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1202 zcrypt_misc_device.minor, rc);
1203 goto out; 1194 goto out;
1204 }
1205 1195
1206 /* Set up the proc file system */ 1196 /* Set up the proc file system */
1207 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1197 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1208 if (!zcrypt_entry) { 1198 if (!zcrypt_entry) {
1209 PRINTK("Couldn't create z90crypt proc entry\n");
1210 rc = -ENOMEM; 1199 rc = -ENOMEM;
1211 goto out_misc; 1200 goto out_misc;
1212 } 1201 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 5c6e222b2ac4..1d1ec74dadb2 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -30,34 +30,6 @@
30#ifndef _ZCRYPT_API_H_ 30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_ 31#define _ZCRYPT_API_H_
32 32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h" 33#include "ap_bus.h"
62#include <asm/zcrypt.h> 34#include <asm/zcrypt.h>
63 35
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 08657f604b8c..54f4cbc3be9e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -49,6 +49,7 @@
49 49
50static struct ap_device_id zcrypt_cex2a_ids[] = { 50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
52 { /* end of list */ }, 53 { /* end of list */ },
53}; 54};
54 55
@@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev,
242 return convert_type80(zdev, reply, 243 return convert_type80(zdev, reply,
243 outputdata, outputdatalength); 244 outputdata, outputdatalength);
244 default: /* Unknown response type, this should NEVER EVER happen */ 245 default: /* Unknown response type, this should NEVER EVER happen */
245 PRINTK("Unrecognized Message Header: %08x%08x\n",
246 *(unsigned int *) reply->message,
247 *(unsigned int *) (reply->message+4));
248 zdev->online = 0; 246 zdev->online = 0;
249 return -EAGAIN; /* repeat the request on a different device. */ 247 return -EAGAIN; /* repeat the request on a different device. */
250 } 248 }
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 3e27fe77d207..03ba27f05f92 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
92{ 92{
93 struct error_hdr *ehdr = reply->message; 93 struct error_hdr *ehdr = reply->message;
94 94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) { 95 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID: 96 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE: 97 case REP82_ERROR_OPERAND_SIZE:
@@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
123 zdev->online = 0; 119 zdev->online = 0;
124 return -EAGAIN; 120 return -EAGAIN;
125 default: 121 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0; 122 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */ 123 return -EAGAIN; /* repeat the request on a different device. */
130 } 124 }
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 6e93b4751782..12da4815ba8e 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev,
226 return convert_type84(zdev, reply, 226 return convert_type84(zdev, reply,
227 outputdata, outputdatalength); 227 outputdata, outputdatalength);
228 default: /* Unknown response type, this should NEVER EVER happen */ 228 default: /* Unknown response type, this should NEVER EVER happen */
229 PRINTK("Unrecognized Message Header: %08x%08x\n",
230 *(unsigned int *) reply->message,
231 *(unsigned int *) (reply->message+4));
232 zdev->online = 0; 229 zdev->online = 0;
233 return -EAGAIN; /* repeat the request on a different device. */ 230 return -EAGAIN; /* repeat the request on a different device. */
234 } 231 }
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 17ea56ce1c11..779952cb19fc 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev,
361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); 361 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
362 if (unlikely(service_rc != 0)) { 362 if (unlikely(service_rc != 0)) {
363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode); 363 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
364 if (service_rc == 8 && service_rs == 66) { 364 if (service_rc == 8 && service_rs == 66)
365 PDEBUG("Bad block format on PCICC\n");
366 return -EINVAL; 365 return -EINVAL;
367 } 366 if (service_rc == 8 && service_rs == 65)
368 if (service_rc == 8 && service_rs == 65) {
369 PDEBUG("Probably an even modulus on PCICC\n");
370 return -EINVAL; 367 return -EINVAL;
371 }
372 if (service_rc == 8 && service_rs == 770) { 368 if (service_rc == 8 && service_rs == 770) {
373 PDEBUG("Invalid key length on PCICC\n");
374 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 369 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
375 return -EAGAIN; 370 return -EAGAIN;
376 } 371 }
377 if (service_rc == 8 && service_rs == 783) { 372 if (service_rc == 8 && service_rs == 783) {
378 PDEBUG("Extended bitlengths not enabled on PCICC\n");
379 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
380 return -EAGAIN; 374 return -EAGAIN;
381 } 375 }
382 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
383 service_rc, service_rs);
384 zdev->online = 0; 376 zdev->online = 0;
385 return -EAGAIN; /* repeat the request on a different device. */ 377 return -EAGAIN; /* repeat the request on a different device. */
386 } 378 }
@@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev,
434 outputdata, outputdatalength); 426 outputdata, outputdatalength);
435 /* no break, incorrect cprb version is an unknown response */ 427 /* no break, incorrect cprb version is an unknown response */
436 default: /* Unknown response type, this should NEVER EVER happen */ 428 default: /* Unknown response type, this should NEVER EVER happen */
437 PRINTK("Unrecognized Message Header: %08x%08x\n",
438 *(unsigned int *) reply->message,
439 *(unsigned int *) (reply->message+4));
440 zdev->online = 0; 429 zdev->online = 0;
441 return -EAGAIN; /* repeat the request on a different device. */ 430 return -EAGAIN; /* repeat the request on a different device. */
442 } 431 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 0bc9b3188e64..d8ad36f81540 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -72,6 +72,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
75 { /* end of list */ }, 76 { /* end of list */ },
76}; 77};
77 78
@@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
289 ap_msg->length = sizeof(struct type6_hdr) + 290 ap_msg->length = sizeof(struct type6_hdr) +
290 CEIL4(xcRB->request_control_blk_length) + 291 CEIL4(xcRB->request_control_blk_length) +
291 xcRB->request_data_length; 292 xcRB->request_data_length;
292 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { 293 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
293 PRINTK("Combined message is too large (%ld/%d/%d).\n",
294 sizeof(struct type6_hdr),
295 xcRB->request_control_blk_length,
296 xcRB->request_data_length);
297 return -EFAULT; 294 return -EFAULT;
298 } 295 if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
299 if (CEIL4(xcRB->reply_control_blk_length) >
300 PCIXCC_MAX_XCRB_REPLY_SIZE) {
301 PDEBUG("Reply CPRB length is too large (%d).\n",
302 xcRB->request_control_blk_length);
303 return -EFAULT; 296 return -EFAULT;
304 } 297 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
305 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
306 PDEBUG("Reply data block length is too large (%d).\n",
307 xcRB->reply_data_length);
308 return -EFAULT; 298 return -EFAULT;
309 }
310 replylen = CEIL4(xcRB->reply_control_blk_length) + 299 replylen = CEIL4(xcRB->reply_control_blk_length) +
311 CEIL4(xcRB->reply_data_length) + 300 CEIL4(xcRB->reply_data_length) +
312 sizeof(struct type86_fmt2_msg); 301 sizeof(struct type86_fmt2_msg);
313 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { 302 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
314 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
315 " (%d/%d/%d).\n",
316 sizeof(struct type86_fmt2_msg),
317 xcRB->reply_control_blk_length,
318 xcRB->reply_data_length);
319 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - 303 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
320 (sizeof(struct type86_fmt2_msg) + 304 (sizeof(struct type86_fmt2_msg) +
321 CEIL4(xcRB->reply_data_length)); 305 CEIL4(xcRB->reply_data_length));
322 PDEBUG("Capping Reply CPRB length at %d\n",
323 xcRB->reply_control_blk_length);
324 } 306 }
325 307
326 /* prepare type6 header */ 308 /* prepare type6 header */
@@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
339 xcRB->request_control_blk_length)) 321 xcRB->request_control_blk_length))
340 return -EFAULT; 322 return -EFAULT;
341 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > 323 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
342 xcRB->request_control_blk_length) { 324 xcRB->request_control_blk_length)
343 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
344 xcRB->request_control_blk_length);
345 return -EFAULT; 325 return -EFAULT;
346 }
347 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
348 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
349 328
@@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
471 service_rc = msg->cprbx.ccp_rtcode; 450 service_rc = msg->cprbx.ccp_rtcode;
472 if (unlikely(service_rc != 0)) { 451 if (unlikely(service_rc != 0)) {
473 service_rs = msg->cprbx.ccp_rscode; 452 service_rs = msg->cprbx.ccp_rscode;
474 if (service_rc == 8 && service_rs == 66) { 453 if (service_rc == 8 && service_rs == 66)
475 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
476 return -EINVAL; 454 return -EINVAL;
477 } 455 if (service_rc == 8 && service_rs == 65)
478 if (service_rc == 8 && service_rs == 65) {
479 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
480 return -EINVAL; 456 return -EINVAL;
481 } 457 if (service_rc == 8 && service_rs == 770)
482 if (service_rc == 8 && service_rs == 770) {
483 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
484 return -EINVAL; 458 return -EINVAL;
485 }
486 if (service_rc == 8 && service_rs == 783) { 459 if (service_rc == 8 && service_rs == 783) {
487 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
488 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 460 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
489 return -EAGAIN; 461 return -EAGAIN;
490 } 462 }
491 if (service_rc == 12 && service_rs == 769) { 463 if (service_rc == 12 && service_rs == 769)
492 PDEBUG("Invalid key on PCIXCC/CEX2C\n");
493 return -EINVAL; 464 return -EINVAL;
494 }
495 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
496 service_rc, service_rs);
497 zdev->online = 0; 465 zdev->online = 0;
498 return -EAGAIN; /* repeat the request on a different device. */ 466 return -EAGAIN; /* repeat the request on a different device. */
499 } 467 }
@@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
569 } __attribute__((packed)) *msg = reply->message; 537 } __attribute__((packed)) *msg = reply->message;
570 char *data = reply->message; 538 char *data = reply->message;
571 539
572 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { 540 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
573 PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
574 rc, rs);
575 return -EINVAL; 541 return -EINVAL;
576 }
577 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); 542 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
578 return msg->fmt2.count2; 543 return msg->fmt2.count2;
579} 544}
@@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev,
598 outputdata, outputdatalength); 563 outputdata, outputdatalength);
599 /* no break, incorrect cprb version is an unknown response */ 564 /* no break, incorrect cprb version is an unknown response */
600 default: /* Unknown response type, this should NEVER EVER happen */ 565 default: /* Unknown response type, this should NEVER EVER happen */
601 PRINTK("Unrecognized Message Header: %08x%08x\n",
602 *(unsigned int *) reply->message,
603 *(unsigned int *) (reply->message+4));
604 zdev->online = 0; 566 zdev->online = 0;
605 return -EAGAIN; /* repeat the request on a different device. */ 567 return -EAGAIN; /* repeat the request on a different device. */
606 } 568 }
@@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
627 return convert_type86_xcrb(zdev, reply, xcRB); 589 return convert_type86_xcrb(zdev, reply, xcRB);
628 /* no break, incorrect cprb version is an unknown response */ 590 /* no break, incorrect cprb version is an unknown response */
629 default: /* Unknown response type, this should NEVER EVER happen */ 591 default: /* Unknown response type, this should NEVER EVER happen */
630 PRINTK("Unrecognized Message Header: %08x%08x\n",
631 *(unsigned int *) reply->message,
632 *(unsigned int *) (reply->message+4));
633 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 592 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
634 zdev->online = 0; 593 zdev->online = 0;
635 return -EAGAIN; /* repeat the request on a different device. */ 594 return -EAGAIN; /* repeat the request on a different device. */
@@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev,
653 return convert_type86_rng(zdev, reply, data); 612 return convert_type86_rng(zdev, reply, data);
654 /* no break, incorrect cprb version is an unknown response */ 613 /* no break, incorrect cprb version is an unknown response */
655 default: /* Unknown response type, this should NEVER EVER happen */ 614 default: /* Unknown response type, this should NEVER EVER happen */
656 PRINTK("Unrecognized Message Header: %08x%08x\n",
657 *(unsigned int *) reply->message,
658 *(unsigned int *) (reply->message+4));
659 zdev->online = 0; 615 zdev->online = 0;
660 return -EAGAIN; /* repeat the request on a different device. */ 616 return -EAGAIN; /* repeat the request on a different device. */
661 } 617 }
@@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
700 memcpy(msg->message, reply->message, length); 656 memcpy(msg->message, reply->message, length);
701 break; 657 break;
702 default: 658 default:
703 PRINTK("Invalid internal response type: %i\n", 659 memcpy(msg->message, &error_reply, sizeof error_reply);
704 resp_type->type);
705 memcpy(msg->message, &error_reply,
706 sizeof error_reply);
707 } 660 }
708 } else 661 } else
709 memcpy(msg->message, reply->message, sizeof error_reply); 662 memcpy(msg->message, reply->message, sizeof error_reply);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 04a1d7bf678c..c644669a75c2 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev,
703 if (!cdev->dev.driver_data) { 703 if (!cdev->dev.driver_data) {
704 printk(KERN_WARNING "claw: unsolicited interrupt for device:" 704 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
705 "%s received c-%02x d-%02x\n", 705 "%s received c-%02x d-%02x\n",
706 cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); 706 cdev->dev.bus_id, irb->scsw.cmd.cstat,
707 irb->scsw.cmd.dstat);
707#ifdef FUNCTRACE 708#ifdef FUNCTRACE
708 printk(KERN_INFO "claw: %s() " 709 printk(KERN_INFO "claw: %s() "
709 "exit on line %d\n",__func__,__LINE__); 710 "exit on line %d\n",__func__,__LINE__);
@@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev,
732#ifdef IOTRACE 733#ifdef IOTRACE
733 printk(KERN_INFO "%s: interrupt for device: %04x " 734 printk(KERN_INFO "%s: interrupt for device: %04x "
734 "received c-%02x d-%02x state-%02x\n", 735 "received c-%02x d-%02x state-%02x\n",
735 dev->name, p_ch->devno, irb->scsw.cstat, 736 dev->name, p_ch->devno, irb->scsw.cmd.cstat,
736 irb->scsw.dstat, p_ch->claw_state); 737 irb->scsw.cmd.dstat, p_ch->claw_state);
737#endif 738#endif
738 739
739 /* Copy interruption response block. */ 740 /* Copy interruption response block. */
740 memcpy(p_ch->irb, irb, sizeof(struct irb)); 741 memcpy(p_ch->irb, irb, sizeof(struct irb));
741 742
742 /* Check for good subchannel return code, otherwise error message */ 743 /* Check for good subchannel return code, otherwise error message */
743 if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { 744 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
744 printk(KERN_INFO "%s: subchannel check for device: %04x -" 745 printk(KERN_INFO "%s: subchannel check for device: %04x -"
745 " Sch Stat %02x Dev Stat %02x CPA - %04x\n", 746 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
746 dev->name, p_ch->devno, 747 dev->name, p_ch->devno,
747 irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); 748 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
749 irb->scsw.cmd.cpa);
748#ifdef IOTRACE 750#ifdef IOTRACE
749 dumpit((char *)irb,sizeof(struct irb)); 751 dumpit((char *)irb,sizeof(struct irb));
750 dumpit((char *)(unsigned long)irb->scsw.cpa, 752 dumpit((char *)(unsigned long)irb->scsw.cmd.cpa,
751 sizeof(struct ccw1)); 753 sizeof(struct ccw1));
752#endif 754#endif
753#ifdef FUNCTRACE 755#ifdef FUNCTRACE
@@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev,
759 } 761 }
760 762
761 /* Check the reason-code of a unit check */ 763 /* Check the reason-code of a unit check */
762 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 764 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
763 ccw_check_unit_check(p_ch, irb->ecw[0]); 765 ccw_check_unit_check(p_ch, irb->ecw[0]);
764 }
765 766
766 /* State machine to bring the connection up, down and to restart */ 767 /* State machine to bring the connection up, down and to restart */
767 p_ch->last_dstat = irb->scsw.dstat; 768 p_ch->last_dstat = irb->scsw.cmd.dstat;
768 769
769 switch (p_ch->claw_state) { 770 switch (p_ch->claw_state) {
770 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ 771 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
771#ifdef DEBUGMSG 772#ifdef DEBUGMSG
772 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); 773 printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
773#endif 774#endif
774 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 775 if (!((p_ch->irb->scsw.cmd.stctl &
775 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 776 SCSW_STCTL_SEC_STATUS) ||
776 (p_ch->irb->scsw.stctl == 777 (p_ch->irb->scsw.cmd.stctl ==
777 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 778 SCSW_STCTL_STATUS_PEND) ||
779 (p_ch->irb->scsw.cmd.stctl ==
780 (SCSW_STCTL_ALERT_STATUS |
781 SCSW_STCTL_STATUS_PEND)))) {
778#ifdef FUNCTRACE 782#ifdef FUNCTRACE
779 printk(KERN_INFO "%s:%s Exit on line %d\n", 783 printk(KERN_INFO "%s:%s Exit on line %d\n",
780 dev->name,__func__,__LINE__); 784 dev->name,__func__,__LINE__);
@@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev,
798 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", 802 printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
799 dev->name); 803 dev->name);
800#endif 804#endif
801 if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 805 if (!((p_ch->irb->scsw.cmd.stctl &
802 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 806 SCSW_STCTL_SEC_STATUS) ||
803 (p_ch->irb->scsw.stctl == 807 (p_ch->irb->scsw.cmd.stctl ==
804 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 808 SCSW_STCTL_STATUS_PEND) ||
809 (p_ch->irb->scsw.cmd.stctl ==
810 (SCSW_STCTL_ALERT_STATUS |
811 SCSW_STCTL_STATUS_PEND)))) {
805#ifdef FUNCTRACE 812#ifdef FUNCTRACE
806 printk(KERN_INFO "%s:%s Exit on line %d\n", 813 printk(KERN_INFO "%s:%s Exit on line %d\n",
807 dev->name,__func__,__LINE__); 814 dev->name,__func__,__LINE__);
@@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev,
828 "interrupt for device:" 835 "interrupt for device:"
829 "%s received c-%02x d-%02x\n", 836 "%s received c-%02x d-%02x\n",
830 cdev->dev.bus_id, 837 cdev->dev.bus_id,
831 irb->scsw.cstat, 838 irb->scsw.cmd.cstat,
832 irb->scsw.dstat); 839 irb->scsw.cmd.dstat);
833 return; 840 return;
834 } 841 }
835#ifdef DEBUGMSG 842#ifdef DEBUGMSG
@@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev,
844 return; 851 return;
845 case CLAW_START_READ: 852 case CLAW_START_READ:
846 CLAW_DBF_TEXT(4,trace,"ReadIRQ"); 853 CLAW_DBF_TEXT(4,trace,"ReadIRQ");
847 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 854 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
848 clear_bit(0, (void *)&p_ch->IO_active); 855 clear_bit(0, (void *)&p_ch->IO_active);
849 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || 856 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
850 (p_ch->irb->ecw[0] & 0x40) == 0x40 || 857 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
@@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev,
863 CLAW_DBF_TEXT(4,trace,"notrdy"); 870 CLAW_DBF_TEXT(4,trace,"notrdy");
864 return; 871 return;
865 } 872 }
866 if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && 873 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
867 (p_ch->irb->scsw.dstat==0)) { 874 (p_ch->irb->scsw.cmd.dstat == 0)) {
868 if (test_and_set_bit(CLAW_BH_ACTIVE, 875 if (test_and_set_bit(CLAW_BH_ACTIVE,
869 (void *)&p_ch->flag_a) == 0) { 876 (void *)&p_ch->flag_a) == 0) {
870 tasklet_schedule(&p_ch->tasklet); 877 tasklet_schedule(&p_ch->tasklet);
@@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev,
879 CLAW_DBF_TEXT(4,trace,"PCI_read"); 886 CLAW_DBF_TEXT(4,trace,"PCI_read");
880 return; 887 return;
881 } 888 }
882 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 889 if (!((p_ch->irb->scsw.cmd.stctl &
883 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 890 SCSW_STCTL_SEC_STATUS) ||
884 (p_ch->irb->scsw.stctl == 891 (p_ch->irb->scsw.cmd.stctl ==
885 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 892 SCSW_STCTL_STATUS_PEND) ||
893 (p_ch->irb->scsw.cmd.stctl ==
894 (SCSW_STCTL_ALERT_STATUS |
895 SCSW_STCTL_STATUS_PEND)))) {
886#ifdef FUNCTRACE 896#ifdef FUNCTRACE
887 printk(KERN_INFO "%s:%s Exit on line %d\n", 897 printk(KERN_INFO "%s:%s Exit on line %d\n",
888 dev->name,__func__,__LINE__); 898 dev->name,__func__,__LINE__);
@@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev,
911 CLAW_DBF_TEXT(4,trace,"RdIRQXit"); 921 CLAW_DBF_TEXT(4,trace,"RdIRQXit");
912 return; 922 return;
913 case CLAW_START_WRITE: 923 case CLAW_START_WRITE:
914 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 924 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
915 printk(KERN_INFO "%s: Unit Check Occured in " 925 printk(KERN_INFO "%s: Unit Check Occured in "
916 "write channel\n",dev->name); 926 "write channel\n",dev->name);
917 clear_bit(0, (void *)&p_ch->IO_active); 927 clear_bit(0, (void *)&p_ch->IO_active);
@@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev,
934 CLAW_DBF_TEXT(4,trace,"rstrtwrt"); 944 CLAW_DBF_TEXT(4,trace,"rstrtwrt");
935 return; 945 return;
936 } 946 }
937 if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { 947 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
938 clear_bit(0, (void *)&p_ch->IO_active); 948 clear_bit(0, (void *)&p_ch->IO_active);
939 printk(KERN_INFO "%s: Unit Exception " 949 printk(KERN_INFO "%s: Unit Exception "
940 "Occured in write channel\n", 950 "Occured in write channel\n",
941 dev->name); 951 dev->name);
942 } 952 }
943 if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 953 if (!((p_ch->irb->scsw.cmd.stctl &
944 (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 954 SCSW_STCTL_SEC_STATUS) ||
945 (p_ch->irb->scsw.stctl == 955 (p_ch->irb->scsw.cmd.stctl ==
946 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { 956 SCSW_STCTL_STATUS_PEND) ||
957 (p_ch->irb->scsw.cmd.stctl ==
958 (SCSW_STCTL_ALERT_STATUS |
959 SCSW_STCTL_STATUS_PEND)))) {
947#ifdef FUNCTRACE 960#ifdef FUNCTRACE
948 printk(KERN_INFO "%s:%s Exit on line %d\n", 961 printk(KERN_INFO "%s:%s Exit on line %d\n",
949 dev->name,__func__,__LINE__); 962 dev->name,__func__,__LINE__);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 2a106f3a076d..7e6bd387f4d8 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
257 if (duration > ch->prof.tx_time) 257 if (duration > ch->prof.tx_time)
258 ch->prof.tx_time = duration; 258 ch->prof.tx_time = duration;
259 259
260 if (ch->irb->scsw.count != 0) 260 if (ch->irb->scsw.cmd.count != 0)
261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
262 dev->name, ch->irb->scsw.count); 262 dev->name, ch->irb->scsw.cmd.count);
263 fsm_deltimer(&ch->timer); 263 fsm_deltimer(&ch->timer);
264 while ((skb = skb_dequeue(&ch->io_queue))) { 264 while ((skb = skb_dequeue(&ch->io_queue))) {
265 priv->stats.tx_packets++; 265 priv->stats.tx_packets++;
@@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
353 struct channel *ch = arg; 353 struct channel *ch = arg;
354 struct net_device *dev = ch->netdev; 354 struct net_device *dev = ch->netdev;
355 struct ctcm_priv *priv = dev->priv; 355 struct ctcm_priv *priv = dev->priv;
356 int len = ch->max_bufsize - ch->irb->scsw.count; 356 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
357 struct sk_buff *skb = ch->trans_skb; 357 struct sk_buff *skb = ch->trans_skb;
358 __u16 block_len = *((__u16 *)skb->data); 358 __u16 block_len = *((__u16 *)skb->data);
359 int check_len; 359 int check_len;
@@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1234 if (duration > ch->prof.tx_time) 1234 if (duration > ch->prof.tx_time)
1235 ch->prof.tx_time = duration; 1235 ch->prof.tx_time = duration;
1236 1236
1237 if (ch->irb->scsw.count != 0) 1237 if (ch->irb->scsw.cmd.count != 0)
1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
1239 dev->name, ch->irb->scsw.count); 1239 dev->name, ch->irb->scsw.cmd.count);
1240 fsm_deltimer(&ch->timer); 1240 fsm_deltimer(&ch->timer);
1241 while ((skb = skb_dequeue(&ch->io_queue))) { 1241 while ((skb = skb_dequeue(&ch->io_queue))) {
1242 priv->stats.tx_packets++; 1242 priv->stats.tx_packets++;
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1394 struct sk_buff *skb = ch->trans_skb; 1394 struct sk_buff *skb = ch->trans_skb;
1395 struct sk_buff *new_skb; 1395 struct sk_buff *new_skb;
1396 unsigned long saveflags = 0; /* avoids compiler warning */ 1396 unsigned long saveflags = 0; /* avoids compiler warning */
1397 int len = ch->max_bufsize - ch->irb->scsw.count; 1397 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1398 1398
1399 if (do_debug_data) { 1399 if (do_debug_data) {
1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", 1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index d52843da4f55..6b13c1c1beb8 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1236 /* Check for unsolicited interrupts. */ 1236 /* Check for unsolicited interrupts. */
1237 if (cgdev == NULL) { 1237 if (cgdev == NULL) {
1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", 1238 ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
1239 cdev->dev.bus_id, irb->scsw.cstat, 1239 cdev->dev.bus_id, irb->scsw.cmd.cstat,
1240 irb->scsw.dstat); 1240 irb->scsw.cmd.dstat);
1241 return; 1241 return;
1242 } 1242 }
1243 1243
@@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1266 "received c-%02x d-%02x\n", 1266 "received c-%02x d-%02x\n",
1267 dev->name, 1267 dev->name,
1268 ch->id, 1268 ch->id,
1269 irb->scsw.cstat, 1269 irb->scsw.cmd.cstat,
1270 irb->scsw.dstat); 1270 irb->scsw.cmd.dstat);
1271 1271
1272 /* Copy interruption response block. */ 1272 /* Copy interruption response block. */
1273 memcpy(ch->irb, irb, sizeof(struct irb)); 1273 memcpy(ch->irb, irb, sizeof(struct irb));
1274 1274
1275 /* Check for good subchannel return code, otherwise error message */ 1275 /* Check for good subchannel return code, otherwise error message */
1276 if (irb->scsw.cstat) { 1276 if (irb->scsw.cmd.cstat) {
1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); 1277 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", 1278 ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
1279 dev->name, ch->id, irb->scsw.cstat, 1279 dev->name, ch->id, irb->scsw.cmd.cstat,
1280 irb->scsw.dstat); 1280 irb->scsw.cmd.dstat);
1281 return; 1281 return;
1282 } 1282 }
1283 1283
1284 /* Check the reason-code of a unit check */ 1284 /* Check the reason-code of a unit check */
1285 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 1285 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
1286 ccw_unit_check(ch, irb->ecw[0]); 1286 ccw_unit_check(ch, irb->ecw[0]);
1287 return; 1287 return;
1288 } 1288 }
1289 if (irb->scsw.dstat & DEV_STAT_BUSY) { 1289 if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
1290 if (irb->scsw.dstat & DEV_STAT_ATTENTION) 1290 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); 1291 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1292 else 1292 else
1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); 1293 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1294 return; 1294 return;
1295 } 1295 }
1296 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 1296 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); 1297 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1298 return; 1298 return;
1299 } 1299 }
1300 if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || 1300 if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
1301 (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || 1301 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
1302 (irb->scsw.stctl == 1302 (irb->scsw.cmd.stctl ==
1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) 1303 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); 1304 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1305 else 1305 else
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 8e7697305a4c..f4a32375c037 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -36,7 +36,6 @@ const char *cu3088_type[] = {
36 "CTC/A", 36 "CTC/A",
37 "ESCON channel", 37 "ESCON channel",
38 "FICON channel", 38 "FICON channel",
39 "P390 LCS card",
40 "OSA LCS card", 39 "OSA LCS card",
41 "CLAW channel device", 40 "CLAW channel device",
42 "unknown channel type", 41 "unknown channel type",
@@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = {
49 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, 48 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
50 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, 49 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
51 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, 50 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
52 { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
53 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, 51 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
54 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, 52 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
55 { /* end of list */ } 53 { /* end of list */ }
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
index 1753661f702a..d8558a7105a5 100644
--- a/drivers/s390/net/cu3088.h
+++ b/drivers/s390/net/cu3088.h
@@ -17,9 +17,6 @@ enum channel_types {
17 /* Device is a FICON channel */ 17 /* Device is a FICON channel */
18 channel_type_ficon, 18 channel_type_ficon,
19 19
20 /* Device is a P390 LCS card */
21 channel_type_p390,
22
23 /* Device is a OSA2 card */ 20 /* Device is a OSA2 card */
24 channel_type_osa2, 21 channel_type_osa2,
25 22
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index dd22f4b37037..6de28385b354 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1327 char *sense; 1327 char *sense;
1328 1328
1329 sense = (char *) irb->ecw; 1329 sense = (char *) irb->ecw;
1330 cstat = irb->scsw.cstat; 1330 cstat = irb->scsw.cmd.cstat;
1331 dstat = irb->scsw.dstat; 1331 dstat = irb->scsw.cmd.dstat;
1332 1332
1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1333 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1334 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1388 else 1388 else
1389 channel = &card->write; 1389 channel = &card->write;
1390 1390
1391 cstat = irb->scsw.cstat; 1391 cstat = irb->scsw.cmd.cstat;
1392 dstat = irb->scsw.dstat; 1392 dstat = irb->scsw.cmd.dstat;
1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); 1393 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
1394 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); 1394 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1395 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); 1395 irb->scsw.cmd.dstat);
1396 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1397 irb->scsw.cmd.actl);
1396 1398
1397 /* Check for channel and device errors presented */ 1399 /* Check for channel and device errors presented */
1398 rc = lcs_get_problem(cdev, irb); 1400 rc = lcs_get_problem(cdev, irb);
@@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1410 } 1412 }
1411 /* How far in the ccw chain have we processed? */ 1413 /* How far in the ccw chain have we processed? */
1412 if ((channel->state != LCS_CH_STATE_INIT) && 1414 if ((channel->state != LCS_CH_STATE_INIT) &&
1413 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1415 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) {
1414 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1416 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1415 - channel->ccws; 1417 - channel->ccws;
1416 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1418 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1417 (irb->scsw.cstat & SCHN_STAT_PCI)) 1419 (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1418 /* Bloody io subsystem tells us lies about cpa... */ 1420 /* Bloody io subsystem tells us lies about cpa... */
1419 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1421 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1420 while (channel->io_idx != index) { 1422 while (channel->io_idx != index) {
@@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1425 } 1427 }
1426 } 1428 }
1427 1429
1428 if ((irb->scsw.dstat & DEV_STAT_DEV_END) || 1430 if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1429 (irb->scsw.dstat & DEV_STAT_CHN_END) || 1431 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1430 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) 1432 (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1431 /* Mark channel as stopped. */ 1433 /* Mark channel as stopped. */
1432 channel->state = LCS_CH_STATE_STOPPED; 1434 channel->state = LCS_CH_STATE_STOPPED;
1433 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) 1435 else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1434 /* CCW execution stopped on a suspend bit. */ 1436 /* CCW execution stopped on a suspend bit. */
1435 channel->state = LCS_CH_STATE_SUSPENDED; 1437 channel->state = LCS_CH_STATE_SUSPENDED;
1436 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1438 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1437 if (irb->scsw.cc != 0) { 1439 if (irb->scsw.cmd.cc != 0) {
1438 ccw_device_halt(channel->ccwdev, (addr_t) channel); 1440 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1439 return; 1441 return;
1440 } 1442 }
1441 /* The channel has been stopped by halt_IO. */ 1443 /* The channel has been stopped by halt_IO. */
1442 channel->state = LCS_CH_STATE_HALTED; 1444 channel->state = LCS_CH_STATE_HALTED;
1443 } 1445 }
1444 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1446 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1445 channel->state = LCS_CH_STATE_CLEARED; 1447 channel->state = LCS_CH_STATE_CLEARED;
1446 }
1447 /* Do the rest in the tasklet. */ 1448 /* Do the rest in the tasklet. */
1448 tasklet_schedule(&channel->irq_tasklet); 1449 tasklet_schedule(&channel->irq_tasklet);
1449} 1450}
@@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1761 netif_carrier_off(card->dev); 1762 netif_carrier_off(card->dev);
1762 break; 1763 break;
1763 default: 1764 default:
1764 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); 1765 LCS_DBF_TEXT(5, trace, "noLGWcmd");
1765 break; 1766 break;
1766 } 1767 }
1767 } else 1768 } else
@@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2042 LCS_DBF_TEXT(2, setup, "add_dev"); 2043 LCS_DBF_TEXT(2, setup, "add_dev");
2043 card = lcs_alloc_card(); 2044 card = lcs_alloc_card();
2044 if (!card) { 2045 if (!card) {
2045 PRINT_ERR("Allocation of lcs card failed\n"); 2046 LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
2046 put_device(&ccwgdev->dev); 2047 put_device(&ccwgdev->dev);
2047 return -ENOMEM; 2048 return -ENOMEM;
2048 } 2049 }
2049 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); 2050 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2050 if (ret) { 2051 if (ret) {
2051 PRINT_ERR("Creating attributes failed");
2052 lcs_free_card(card); 2052 lcs_free_card(card);
2053 put_device(&ccwgdev->dev); 2053 put_device(&ccwgdev->dev);
2054 return ret; 2054 return ret;
@@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2140 default: 2140 default:
2141 LCS_DBF_TEXT(3, setup, "errinit"); 2141 LCS_DBF_TEXT(3, setup, "errinit");
2142 PRINT_ERR("LCS: Initialization failed\n"); 2142 PRINT_ERR("LCS: Initialization failed\n");
2143 PRINT_ERR("LCS: No device found!\n");
2144 goto out; 2143 goto out;
2145 } 2144 }
2146 if (!dev) 2145 if (!dev)
@@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2269 if (!card) 2268 if (!card)
2270 return; 2269 return;
2271 2270
2272 PRINT_INFO("Removing lcs group device ....\n");
2273 LCS_DBF_TEXT(3, setup, "remdev"); 2271 LCS_DBF_TEXT(3, setup, "remdev");
2274 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2272 LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2275 if (ccwgdev->state == CCWGROUP_ONLINE) { 2273 if (ccwgdev->state == CCWGROUP_ONLINE) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index e4ba6a0372ac..9242b5acc66b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
625 offset += header->next; 625 offset += header->next;
626 header->next -= NETIUCV_HDRLEN; 626 header->next -= NETIUCV_HDRLEN;
627 if (skb_tailroom(pskb) < header->next) { 627 if (skb_tailroom(pskb) < header->next) {
628 PRINT_WARN("%s: Illegal next field in iucv header: "
629 "%d > %d\n",
630 dev->name, header->next, skb_tailroom(pskb));
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", 628 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb)); 629 header->next, skb_tailroom(pskb));
633 return; 630 return;
@@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn,
636 skb_reset_mac_header(pskb); 633 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len); 634 skb = dev_alloc_skb(pskb->len);
638 if (!skb) { 635 if (!skb) {
639 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
640 dev->name);
641 IUCV_DBF_TEXT(data, 2, 636 IUCV_DBF_TEXT(data, 2,
642 "Out of memory in netiucv_unpack_skb\n"); 637 "Out of memory in netiucv_unpack_skb\n");
643 privptr->stats.rx_dropped++; 638 privptr->stats.rx_dropped++;
@@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
674 669
675 if (!conn->netdev) { 670 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg); 671 iucv_message_reject(conn->path, msg);
677 PRINT_WARN("Received data for unlinked connection\n");
678 IUCV_DBF_TEXT(data, 2, 672 IUCV_DBF_TEXT(data, 2,
679 "Received data for unlinked connection\n"); 673 "Received data for unlinked connection\n");
680 return; 674 return;
@@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
682 if (msg->length > conn->max_buffsize) { 676 if (msg->length > conn->max_buffsize) {
683 iucv_message_reject(conn->path, msg); 677 iucv_message_reject(conn->path, msg);
684 privptr->stats.rx_dropped++; 678 privptr->stats.rx_dropped++;
685 PRINT_WARN("msglen %d > max_buffsize %d\n",
686 msg->length, conn->max_buffsize);
687 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", 679 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize); 680 msg->length, conn->max_buffsize);
689 return; 681 return;
@@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
695 msg->length, NULL); 687 msg->length, NULL);
696 if (rc || msg->length < 5) { 688 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++; 689 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); 690 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return; 691 return;
701 } 692 }
@@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
778 fsm_newstate(fi, CONN_STATE_IDLE); 769 fsm_newstate(fi, CONN_STATE_IDLE);
779 if (privptr) 770 if (privptr)
780 privptr->stats.tx_errors += txpackets; 771 privptr->stats.tx_errors += txpackets;
781 PRINT_WARN("iucv_send returned %08x\n", rc);
782 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 772 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
783 } else { 773 } else {
784 if (privptr) { 774 if (privptr) {
@@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
806 path->flags = 0; 796 path->flags = 0;
807 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); 797 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
808 if (rc) { 798 if (rc) {
809 PRINT_WARN("%s: IUCV accept failed with error %d\n",
810 netdev->name, rc);
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); 799 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return; 800 return;
813 } 801 }
@@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
873 IUCV_DBF_TEXT(trace, 3, __func__); 861 IUCV_DBF_TEXT(trace, 3, __func__);
874 862
875 fsm_newstate(fi, CONN_STATE_STARTWAIT); 863 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n", 864 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
877 conn->netdev->name, conn->userid); 865 conn->netdev->name, conn->userid);
878 866
879 /* 867 /*
@@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg)
968 struct iucv_connection *conn = arg; 956 struct iucv_connection *conn = arg;
969 struct net_device *netdev = conn->netdev; 957 struct net_device *netdev = conn->netdev;
970 958
971 PRINT_WARN("%s: Cannot connect without username\n", netdev->name); 959 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
972 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); 960 netdev->name, conn->userid);
973} 961}
974 962
975static const fsm_node conn_fsm[] = { 963static const fsm_node conn_fsm[] = {
@@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1077 "connection is up and running\n"); 1065 "connection is up and running\n");
1078 break; 1066 break;
1079 case DEV_STATE_STOPWAIT: 1067 case DEV_STATE_STOPWAIT:
1080 PRINT_INFO(
1081 "%s: got connection UP event during shutdown!\n",
1082 dev->name);
1083 IUCV_DBF_TEXT(data, 2, 1068 IUCV_DBF_TEXT(data, 2,
1084 "dev_action_connup: in DEV_STATE_STOPWAIT\n"); 1069 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1085 break; 1070 break;
@@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1174 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + 1159 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1175 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); 1160 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1176 if (!nskb) { 1161 if (!nskb) {
1177 PRINT_WARN("%s: Could not allocate tx_skb\n",
1178 conn->netdev->name);
1179 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); 1162 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1180 rc = -ENOMEM; 1163 rc = -ENOMEM;
1181 return rc; 1164 return rc;
@@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1223 skb_pull(skb, NETIUCV_HDRLEN); 1206 skb_pull(skb, NETIUCV_HDRLEN);
1224 skb_trim(skb, skb->len - NETIUCV_HDRLEN); 1207 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1225 } 1208 }
1226 PRINT_WARN("iucv_send returned %08x\n", rc);
1227 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); 1209 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1228 } else { 1210 } else {
1229 if (copied) 1211 if (copied)
@@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1293 * Some sanity checks ... 1275 * Some sanity checks ...
1294 */ 1276 */
1295 if (skb == NULL) { 1277 if (skb == NULL) {
1296 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1297 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); 1278 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1298 privptr->stats.tx_dropped++; 1279 privptr->stats.tx_dropped++;
1299 return 0; 1280 return 0;
1300 } 1281 }
1301 if (skb_headroom(skb) < NETIUCV_HDRLEN) { 1282 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1302 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1303 dev->name, NETIUCV_HDRLEN);
1304 IUCV_DBF_TEXT(data, 2, 1283 IUCV_DBF_TEXT(data, 2,
1305 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); 1284 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1306 dev_kfree_skb(skb); 1285 dev_kfree_skb(skb);
@@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393 1372
1394 IUCV_DBF_TEXT(trace, 3, __func__); 1373 IUCV_DBF_TEXT(trace, 3, __func__);
1395 if (count > 9) { 1374 if (count > 9) {
1396 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1397 IUCV_DBF_TEXT_(setup, 2, 1375 IUCV_DBF_TEXT_(setup, 2,
1398 "%d is length of username\n", (int) count); 1376 "%d is length of username\n", (int) count);
1399 return -EINVAL; 1377 return -EINVAL;
@@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1409 /* trailing lf, grr */ 1387 /* trailing lf, grr */
1410 break; 1388 break;
1411 } 1389 }
1412 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1413 IUCV_DBF_TEXT_(setup, 2, 1390 IUCV_DBF_TEXT_(setup, 2,
1414 "username: invalid character %c\n", *p); 1391 "username: invalid character %c\n", *p);
1415 return -EINVAL; 1392 return -EINVAL;
@@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1421 if (memcmp(username, priv->conn->userid, 9) && 1398 if (memcmp(username, priv->conn->userid, 9) &&
1422 (ndev->flags & (IFF_UP | IFF_RUNNING))) { 1399 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1423 /* username changed while the interface is active. */ 1400 /* username changed while the interface is active. */
1424 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1425 dev->bus_id, priv->conn->userid);
1426 PRINT_WARN("netiucv: user cannot be updated\n");
1427 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); 1401 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1428 return -EBUSY; 1402 return -EPERM;
1429 } 1403 }
1430 read_lock_bh(&iucv_connection_rwlock); 1404 read_lock_bh(&iucv_connection_rwlock);
1431 list_for_each_entry(cp, &iucv_connection_list, list) { 1405 list_for_each_entry(cp, &iucv_connection_list, list) {
1432 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { 1406 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1433 read_unlock_bh(&iucv_connection_rwlock); 1407 read_unlock_bh(&iucv_connection_rwlock);
1434 PRINT_WARN("netiucv: Connection to %s already " 1408 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1435 "exists\n", username); 1409 "to %s already exists\n", username);
1436 return -EEXIST; 1410 return -EEXIST;
1437 } 1411 }
1438 } 1412 }
@@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1466 bs1 = simple_strtoul(buf, &e, 0); 1440 bs1 = simple_strtoul(buf, &e, 0);
1467 1441
1468 if (e && (!isspace(*e))) { 1442 if (e && (!isspace(*e))) {
1469 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1470 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); 1443 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1471 return -EINVAL; 1444 return -EINVAL;
1472 } 1445 }
1473 if (bs1 > NETIUCV_BUFSIZE_MAX) { 1446 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1474 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2, 1447 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too large\n", 1448 "buffer_write: buffer size %d too large\n",
1478 bs1); 1449 bs1);
@@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1480 } 1451 }
1481 if ((ndev->flags & IFF_RUNNING) && 1452 if ((ndev->flags & IFF_RUNNING) &&
1482 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { 1453 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1483 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1484 bs1);
1485 IUCV_DBF_TEXT_(setup, 2, 1454 IUCV_DBF_TEXT_(setup, 2,
1486 "buffer_write: buffer size %d too small\n", 1455 "buffer_write: buffer size %d too small\n",
1487 bs1); 1456 bs1);
1488 return -EINVAL; 1457 return -EINVAL;
1489 } 1458 }
1490 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { 1459 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1491 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1492 bs1);
1493 IUCV_DBF_TEXT_(setup, 2, 1460 IUCV_DBF_TEXT_(setup, 2,
1494 "buffer_write: buffer size %d too small\n", 1461 "buffer_write: buffer size %d too small\n",
1495 bs1); 1462 bs1);
@@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv,
1963 1930
1964 IUCV_DBF_TEXT(trace, 3, __func__); 1931 IUCV_DBF_TEXT(trace, 3, __func__);
1965 if (count>9) { 1932 if (count>9) {
1966 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1967 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); 1933 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1968 return -EINVAL; 1934 return -EINVAL;
1969 } 1935 }
@@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv,
1976 if (*p == '\n') 1942 if (*p == '\n')
1977 /* trailing lf, grr */ 1943 /* trailing lf, grr */
1978 break; 1944 break;
1979 PRINT_WARN("netiucv: Invalid character in username!\n");
1980 IUCV_DBF_TEXT_(setup, 2, 1945 IUCV_DBF_TEXT_(setup, 2,
1981 "conn_write: invalid character %c\n", *p); 1946 "conn_write: invalid character %c\n", *p);
1982 return -EINVAL; 1947 return -EINVAL;
@@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv,
1989 list_for_each_entry(cp, &iucv_connection_list, list) { 1954 list_for_each_entry(cp, &iucv_connection_list, list) {
1990 if (!strncmp(username, cp->userid, 9)) { 1955 if (!strncmp(username, cp->userid, 9)) {
1991 read_unlock_bh(&iucv_connection_rwlock); 1956 read_unlock_bh(&iucv_connection_rwlock);
1992 PRINT_WARN("netiucv: Connection to %s already " 1957 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1993 "exists\n", username); 1958 "to %s already exists\n", username);
1994 return -EEXIST; 1959 return -EEXIST;
1995 } 1960 }
1996 } 1961 }
@@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv,
1998 1963
1999 dev = netiucv_init_netdevice(username); 1964 dev = netiucv_init_netdevice(username);
2000 if (!dev) { 1965 if (!dev) {
2001 PRINT_WARN("netiucv: Could not allocate network device "
2002 "structure for user '%s'\n",
2003 netiucv_printname(username));
2004 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 1966 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2005 return -ENODEV; 1967 return -ENODEV;
2006 } 1968 }
@@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv,
2020 if (rc) 1982 if (rc)
2021 goto out_unreg; 1983 goto out_unreg;
2022 1984
2023 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2024 1985
2025 return count; 1986 return count;
2026 1987
2027out_unreg: 1988out_unreg:
2028 netiucv_unregister_device(priv->dev); 1989 netiucv_unregister_device(priv->dev);
2029out_free_ndev: 1990out_free_ndev:
2030 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2031 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2032 netiucv_free_netdevice(dev); 1991 netiucv_free_netdevice(dev);
2033 return rc; 1992 return rc;
2034} 1993}
@@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv,
2073 PRINT_WARN("netiucv: %s cannot be removed\n", 2032 PRINT_WARN("netiucv: %s cannot be removed\n",
2074 ndev->name); 2033 ndev->name);
2075 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); 2034 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2076 return -EBUSY; 2035 return -EPERM;
2077 } 2036 }
2078 unregister_netdev(ndev); 2037 unregister_netdev(ndev);
2079 netiucv_unregister_device(dev); 2038 netiucv_unregister_device(dev);
2080 return count; 2039 return count;
2081 } 2040 }
2082 read_unlock_bh(&iucv_connection_rwlock); 2041 read_unlock_bh(&iucv_connection_rwlock);
2083 PRINT_WARN("netiucv: net device %s unknown\n", name);
2084 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); 2042 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2085 return -EINVAL; 2043 return -EINVAL;
2086} 2044}
@@ -2148,7 +2106,6 @@ static int __init netiucv_init(void)
2148 netiucv_driver.groups = netiucv_drv_attr_groups; 2106 netiucv_driver.groups = netiucv_drv_attr_groups;
2149 rc = driver_register(&netiucv_driver); 2107 rc = driver_register(&netiucv_driver);
2150 if (rc) { 2108 if (rc) {
2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
2152 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); 2109 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2153 goto out_iucv; 2110 goto out_iucv;
2154 } 2111 }
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 699ac11debd8..1895dbb553cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
239/*not used unless the microcode gets patched*/ 239/*not used unless the microcode gets patched*/
240#define QETH_PCI_TIMER_VALUE(card) 3 240#define QETH_PCI_TIMER_VALUE(card) 3
241 241
242#define QETH_MIN_INPUT_THRESHOLD 1
243#define QETH_MAX_INPUT_THRESHOLD 500
244#define QETH_MIN_OUTPUT_THRESHOLD 1
245#define QETH_MAX_OUTPUT_THRESHOLD 300
246
247/* priority queing */ 242/* priority queing */
248#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING 243#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
249#define QETH_DEFAULT_QUEUE 2 244#define QETH_DEFAULT_QUEUE 2
@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 806struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 807 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 808int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, 809int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
815 unsigned int, const char *);
816void qeth_queue_input_buffer(struct qeth_card *, int); 810void qeth_queue_input_buffer(struct qeth_card *, int);
817struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 811struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
818 struct qdio_buffer *, struct qdio_buffer_element **, int *, 812 struct qdio_buffer *, struct qdio_buffer_element **, int *,
819 struct qeth_hdr **); 813 struct qeth_hdr **);
820void qeth_schedule_recovery(struct qeth_card *); 814void qeth_schedule_recovery(struct qeth_card *);
821void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 815void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
822 unsigned int, unsigned int, 816 int, int, int, unsigned long);
823 unsigned int, int, int,
824 unsigned long);
825void qeth_clear_ipacmd_list(struct qeth_card *); 817void qeth_clear_ipacmd_list(struct qeth_card *);
826int qeth_qdio_clear_card(struct qeth_card *, int); 818int qeth_qdio_clear_card(struct qeth_card *, int);
827void qeth_clear_working_pool_list(struct qeth_card *); 819void qeth_clear_working_pool_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9a71dae223e8..c3ad89e302bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
420 QETH_DBF_TEXT(TRACE, 3, "urla"); 420 QETH_DBF_TEXT(TRACE, 3, "urla");
421 break; 421 break;
422 default: 422 default:
423 PRINT_WARN("Received data is IPA " 423 QETH_DBF_MESSAGE(2, "Received data is IPA "
424 "but not a reply!\n"); 424 "but not a reply!\n");
425 break; 425 break;
426 } 426 }
@@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
735 char *sense; 735 char *sense;
736 736
737 sense = (char *) irb->ecw; 737 sense = (char *) irb->ecw;
738 cstat = irb->scsw.cstat; 738 cstat = irb->scsw.cmd.cstat;
739 dstat = irb->scsw.dstat; 739 dstat = irb->scsw.cmd.dstat;
740 740
741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 741 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 742 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
823 823
824 if (__qeth_check_irb_error(cdev, intparm, irb)) 824 if (__qeth_check_irb_error(cdev, intparm, irb))
825 return; 825 return;
826 cstat = irb->scsw.cstat; 826 cstat = irb->scsw.cmd.cstat;
827 dstat = irb->scsw.dstat; 827 dstat = irb->scsw.cmd.dstat;
828 828
829 card = CARD_FROM_CDEV(cdev); 829 card = CARD_FROM_CDEV(cdev);
830 if (!card) 830 if (!card)
@@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
842 } 842 }
843 atomic_set(&channel->irq_pending, 0); 843 atomic_set(&channel->irq_pending, 0);
844 844
845 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) 845 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
846 channel->state = CH_STATE_STOPPED; 846 channel->state = CH_STATE_STOPPED;
847 847
848 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) 848 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
849 channel->state = CH_STATE_HALTED; 849 channel->state = CH_STATE_HALTED;
850 850
851 /*let's wake up immediately on data channel*/ 851 /*let's wake up immediately on data channel*/
@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2073static int qeth_qdio_activate(struct qeth_card *card) 2073static int qeth_qdio_activate(struct qeth_card *card)
2074{ 2074{
2075 QETH_DBF_TEXT(SETUP, 3, "qdioact"); 2075 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2076 return qdio_activate(CARD_DDEV(card), 0); 2076 return qdio_activate(CARD_DDEV(card));
2077} 2077}
2078 2078
2079static int qeth_dm_act(struct qeth_card *card) 2079static int qeth_dm_act(struct qeth_card *card)
@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2349 card->qdio.in_q->next_buf_to_init = 2349 card->qdio.in_q->next_buf_to_init =
2350 card->qdio.in_buf_pool.buf_count - 1; 2350 card->qdio.in_buf_pool.buf_count - 1;
2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2352 card->qdio.in_buf_pool.buf_count - 1, NULL); 2352 card->qdio.in_buf_pool.buf_count - 1);
2353 if (rc) { 2353 if (rc) {
2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2355 return rc; 2355 return rc;
2356 } 2356 }
2357 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2358 if (rc) {
2359 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2360 return rc;
2361 }
2362 /* outbound queue */ 2357 /* outbound queue */
2363 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2358 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2364 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2359 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2559EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2554EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2560 2555
2561int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2556int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2562 unsigned int siga_error, const char *dbftext) 2557 const char *dbftext)
2563{ 2558{
2564 if (qdio_error || siga_error) { 2559 if (qdio_error) {
2565 QETH_DBF_TEXT(TRACE, 2, dbftext); 2560 QETH_DBF_TEXT(TRACE, 2, dbftext);
2566 QETH_DBF_TEXT(QERR, 2, dbftext); 2561 QETH_DBF_TEXT(QERR, 2, dbftext);
2567 QETH_DBF_TEXT_(QERR, 2, " F15=%02X", 2562 QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2569 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2564 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2570 buf->element[14].flags & 0xff); 2565 buf->element[14].flags & 0xff);
2571 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2566 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2572 QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
2573 return 1; 2567 return 1;
2574 } 2568 }
2575 return 0; 2569 return 0;
@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2622 card->perf_stats.inbound_do_qdio_start_time = 2616 card->perf_stats.inbound_do_qdio_start_time =
2623 qeth_get_micros(); 2617 qeth_get_micros();
2624 } 2618 }
2625 rc = do_QDIO(CARD_DDEV(card), 2619 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
2626 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 2620 queue->next_buf_to_init, count);
2627 0, queue->next_buf_to_init, count, NULL);
2628 if (card->options.performance_stats) 2621 if (card->options.performance_stats)
2629 card->perf_stats.inbound_do_qdio_time += 2622 card->perf_stats.inbound_do_qdio_time +=
2630 qeth_get_micros() - 2623 qeth_get_micros() -
@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2643EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); 2636EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2644 2637
2645static int qeth_handle_send_error(struct qeth_card *card, 2638static int qeth_handle_send_error(struct qeth_card *card,
2646 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, 2639 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2647 unsigned int siga_err)
2648{ 2640{
2649 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2641 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2650 int cc = siga_err & 3; 2642 int cc = qdio_err & 3;
2651 2643
2652 QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); 2644 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2653 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); 2645 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
2654 switch (cc) { 2646 switch (cc) {
2655 case 0: 2647 case 0:
2656 if (qdio_err) { 2648 if (qdio_err) {
@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2662 } 2654 }
2663 return QETH_SEND_ERROR_NONE; 2655 return QETH_SEND_ERROR_NONE;
2664 case 2: 2656 case 2:
2665 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { 2657 if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
2666 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); 2658 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
2667 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2659 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2668 return QETH_SEND_ERROR_KICK_IT; 2660 return QETH_SEND_ERROR_KICK_IT;
@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2758 return 0; 2750 return 0;
2759} 2751}
2760 2752
2761static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, 2753static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2762 int index, int count) 2754 int count)
2763{ 2755{
2764 struct qeth_qdio_out_buffer *buf; 2756 struct qeth_qdio_out_buffer *buf;
2765 int rc; 2757 int rc;
@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2807 qeth_get_micros(); 2799 qeth_get_micros();
2808 } 2800 }
2809 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 2801 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2810 if (under_int)
2811 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2812 if (atomic_read(&queue->set_pci_flags_count)) 2802 if (atomic_read(&queue->set_pci_flags_count))
2813 qdio_flags |= QDIO_FLAG_PCI_OUT; 2803 qdio_flags |= QDIO_FLAG_PCI_OUT;
2814 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 2804 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2815 queue->queue_no, index, count, NULL); 2805 queue->queue_no, index, count);
2816 if (queue->card->options.performance_stats) 2806 if (queue->card->options.performance_stats)
2817 queue->card->perf_stats.outbound_do_qdio_time += 2807 queue->card->perf_stats.outbound_do_qdio_time +=
2818 qeth_get_micros() - 2808 qeth_get_micros() -
@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2866 queue->card->perf_stats.bufs_sent_pack += 2856 queue->card->perf_stats.bufs_sent_pack +=
2867 flush_cnt; 2857 flush_cnt;
2868 if (flush_cnt) 2858 if (flush_cnt)
2869 qeth_flush_buffers(queue, 1, index, flush_cnt); 2859 qeth_flush_buffers(queue, index, flush_cnt);
2870 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2860 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2871 } 2861 }
2872 } 2862 }
2873} 2863}
2874 2864
2875void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, 2865void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2876 unsigned int qdio_error, unsigned int siga_error, 2866 unsigned int qdio_error, int __queue, int first_element,
2877 unsigned int __queue, int first_element, int count, 2867 int count, unsigned long card_ptr)
2878 unsigned long card_ptr)
2879{ 2868{
2880 struct qeth_card *card = (struct qeth_card *) card_ptr; 2869 struct qeth_card *card = (struct qeth_card *) card_ptr;
2881 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2870 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2883 int i; 2872 int i;
2884 2873
2885 QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); 2874 QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
2886 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2875 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2887 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2876 QETH_DBF_TEXT(TRACE, 2, "achkcond");
2888 QETH_DBF_TEXT(TRACE, 2, "achkcond"); 2877 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2889 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); 2878 netif_stop_queue(card->dev);
2890 QETH_DBF_TEXT_(TRACE, 2, "%08x", status); 2879 qeth_schedule_recovery(card);
2891 netif_stop_queue(card->dev); 2880 return;
2892 qeth_schedule_recovery(card);
2893 return;
2894 }
2895 } 2881 }
2896 if (card->options.performance_stats) { 2882 if (card->options.performance_stats) {
2897 card->perf_stats.outbound_handler_cnt++; 2883 card->perf_stats.outbound_handler_cnt++;
@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2901 for (i = first_element; i < (first_element + count); ++i) { 2887 for (i = first_element; i < (first_element + count); ++i) {
2902 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2888 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2903 /*we only handle the KICK_IT error by doing a recovery */ 2889 /*we only handle the KICK_IT error by doing a recovery */
2904 if (qeth_handle_send_error(card, buffer, 2890 if (qeth_handle_send_error(card, buffer, qdio_error)
2905 qdio_error, siga_error)
2906 == QETH_SEND_ERROR_KICK_IT){ 2891 == QETH_SEND_ERROR_KICK_IT){
2907 netif_stop_queue(card->dev); 2892 netif_stop_queue(card->dev);
2908 qeth_schedule_recovery(card); 2893 qeth_schedule_recovery(card);
@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3164 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3149 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3165 if (ctx == NULL) { 3150 if (ctx == NULL) {
3166 qeth_fill_buffer(queue, buffer, skb); 3151 qeth_fill_buffer(queue, buffer, skb);
3167 qeth_flush_buffers(queue, 0, index, 1); 3152 qeth_flush_buffers(queue, index, 1);
3168 } else { 3153 } else {
3169 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); 3154 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3170 WARN_ON(buffers_needed != flush_cnt); 3155 WARN_ON(buffers_needed != flush_cnt);
3171 qeth_flush_buffers(queue, 0, index, flush_cnt); 3156 qeth_flush_buffers(queue, index, flush_cnt);
3172 } 3157 }
3173 return 0; 3158 return 0;
3174out: 3159out:
@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3221 * again */ 3206 * again */
3222 if (atomic_read(&buffer->state) != 3207 if (atomic_read(&buffer->state) !=
3223 QETH_QDIO_BUF_EMPTY){ 3208 QETH_QDIO_BUF_EMPTY){
3224 qeth_flush_buffers(queue, 0, 3209 qeth_flush_buffers(queue, start_index,
3225 start_index, flush_count); 3210 flush_count);
3226 atomic_set(&queue->state, 3211 atomic_set(&queue->state,
3227 QETH_OUT_Q_UNLOCKED); 3212 QETH_OUT_Q_UNLOCKED);
3228 return -EBUSY; 3213 return -EBUSY;
@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3253 flush_count += tmp; 3238 flush_count += tmp;
3254out: 3239out:
3255 if (flush_count) 3240 if (flush_count)
3256 qeth_flush_buffers(queue, 0, start_index, flush_count); 3241 qeth_flush_buffers(queue, start_index, flush_count);
3257 else if (!atomic_read(&queue->set_pci_flags_count)) 3242 else if (!atomic_read(&queue->set_pci_flags_count))
3258 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 3243 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3259 /* 3244 /*
@@ -3274,7 +3259,7 @@ out:
3274 if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) 3259 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3275 flush_count += qeth_flush_buffers_on_no_pci(queue); 3260 flush_count += qeth_flush_buffers_on_no_pci(queue);
3276 if (flush_count) 3261 if (flush_count)
3277 qeth_flush_buffers(queue, 0, start_index, flush_count); 3262 qeth_flush_buffers(queue, start_index, flush_count);
3278 } 3263 }
3279 /* at this point the queue is UNLOCKED again */ 3264 /* at this point the queue is UNLOCKED again */
3280 if (queue->card->options.performance_stats && do_pack) 3265 if (queue->card->options.performance_stats && do_pack)
@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
3686 init_data.q_format = qeth_get_qdio_q_format(card); 3671 init_data.q_format = qeth_get_qdio_q_format(card);
3687 init_data.qib_param_field_format = 0; 3672 init_data.qib_param_field_format = 0;
3688 init_data.qib_param_field = qib_param_field; 3673 init_data.qib_param_field = qib_param_field;
3689 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3690 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3691 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3692 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3693 init_data.no_input_qs = 1; 3674 init_data.no_input_qs = 1;
3694 init_data.no_output_qs = card->qdio.no_out_queues; 3675 init_data.no_output_qs = card->qdio.no_out_queues;
3695 init_data.input_handler = card->discipline.input_handler; 3676 init_data.input_handler = card->discipline.input_handler;
@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3751 3732
3752int qeth_core_hardsetup_card(struct qeth_card *card) 3733int qeth_core_hardsetup_card(struct qeth_card *card)
3753{ 3734{
3735 struct qdio_ssqd_desc *qdio_ssqd;
3754 int retries = 3; 3736 int retries = 3;
3755 int mpno; 3737 int mpno = 0;
3756 int rc; 3738 int rc;
3757 3739
3758 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3740 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3784,7 +3766,10 @@ retry:
3784 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3766 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3785 return rc; 3767 return rc;
3786 } 3768 }
3787 mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); 3769
3770 qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
3771 if (qdio_ssqd)
3772 mpno = qdio_ssqd->pcnt;
3788 if (mpno) 3773 if (mpno)
3789 mpno = min(mpno - 1, QETH_MAX_PORTNO); 3774 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3790 if (card->info.portno > mpno) { 3775 if (card->info.portno > mpno) {
@@ -4092,7 +4077,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4092 4077
4093 rc = qeth_determine_card_type(card); 4078 rc = qeth_determine_card_type(card);
4094 if (rc) { 4079 if (rc) {
4095 PRINT_WARN("%s: not a valid card type\n", __func__);
4096 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 4080 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4097 goto err_card; 4081 goto err_card;
4098 } 4082 }
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f682f7b14480..3fbc3bdec0c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -726,8 +726,7 @@ tx_drop:
726} 726}
727 727
728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, 728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
729 unsigned int status, unsigned int qdio_err, 729 unsigned int qdio_err, unsigned int queue,
730 unsigned int siga_err, unsigned int queue,
731 int first_element, int count, unsigned long card_ptr) 730 int first_element, int count, unsigned long card_ptr)
732{ 731{
733 struct net_device *net_dev; 732 struct net_device *net_dev;
@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
742 card->perf_stats.inbound_cnt++; 741 card->perf_stats.inbound_cnt++;
743 card->perf_stats.inbound_start_time = qeth_get_micros(); 742 card->perf_stats.inbound_start_time = qeth_get_micros();
744 } 743 }
745 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 744 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
746 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 745 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
747 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 746 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
748 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 747 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
749 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, 748 count);
750 count); 749 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
751 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 750 qeth_schedule_recovery(card);
752 qeth_schedule_recovery(card); 751 return;
753 return;
754 }
755 } 752 }
756 for (i = first_element; i < (first_element + count); ++i) { 753 for (i = first_element; i < (first_element + count); ++i) {
757 index = i % QDIO_MAX_BUFFERS_PER_Q; 754 index = i % QDIO_MAX_BUFFERS_PER_Q;
758 buffer = &card->qdio.in_q->bufs[index]; 755 buffer = &card->qdio.in_q->bufs[index];
759 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 756 if (!(qdio_err &&
760 qeth_check_qdio_errors(buffer->buffer, 757 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
761 qdio_err, siga_err, "qinerr")))
762 qeth_l2_process_inbound_buffer(card, buffer, index); 758 qeth_l2_process_inbound_buffer(card, buffer, index);
763 /* clear buffer and give back to hardware */ 759 /* clear buffer and give back to hardware */
764 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 760 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 85be40abdda9..b29afef5c7fb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
944 else 944 else
945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, 945 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
946 addr->del_flags); 946 addr->del_flags);
947 if (rc) { 947 if (rc)
948 QETH_DBF_TEXT(TRACE, 2, "failed"); 948 QETH_DBF_TEXT(TRACE, 2, "failed");
949 /* TODO: re-activate this warning as soon as we have a
950 * clean mirco code
951 qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
952 PRINT_WARN("Could not deregister IP address %s (rc=%x)\n",
953 buf, rc);
954 */
955 }
956 949
957 return rc; 950 return rc;
958} 951}
@@ -2946,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2946} 2939}
2947 2940
2948static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, 2941static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2949 unsigned int status, unsigned int qdio_err, 2942 unsigned int qdio_err, unsigned int queue, int first_element,
2950 unsigned int siga_err, unsigned int queue, int first_element,
2951 int count, unsigned long card_ptr) 2943 int count, unsigned long card_ptr)
2952{ 2944{
2953 struct net_device *net_dev; 2945 struct net_device *net_dev;
@@ -2962,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2962 card->perf_stats.inbound_cnt++; 2954 card->perf_stats.inbound_cnt++;
2963 card->perf_stats.inbound_start_time = qeth_get_micros(); 2955 card->perf_stats.inbound_start_time = qeth_get_micros();
2964 } 2956 }
2965 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2957 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2966 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2958 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
2967 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 2959 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2968 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2960 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
2969 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", 2961 first_element, count);
2970 first_element, count); 2962 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
2971 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 2963 qeth_schedule_recovery(card);
2972 qeth_schedule_recovery(card); 2964 return;
2973 return;
2974 }
2975 } 2965 }
2976 for (i = first_element; i < (first_element + count); ++i) { 2966 for (i = first_element; i < (first_element + count); ++i) {
2977 index = i % QDIO_MAX_BUFFERS_PER_Q; 2967 index = i % QDIO_MAX_BUFFERS_PER_Q;
2978 buffer = &card->qdio.in_q->bufs[index]; 2968 buffer = &card->qdio.in_q->bufs[index];
2979 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 2969 if (!(qdio_err &&
2980 qeth_check_qdio_errors(buffer->buffer, 2970 qeth_check_qdio_errors(buffer->buffer,
2981 qdio_err, siga_err, "qinerr"))) 2971 qdio_err, "qinerr")))
2982 qeth_l3_process_inbound_buffer(card, buffer, index); 2972 qeth_l3_process_inbound_buffer(card, buffer, index);
2983 /* clear buffer and give back to hardware */ 2973 /* clear buffer and give back to hardware */
2984 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 2974 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 8735a415a116..164e090c2625 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -156,11 +156,8 @@ static int __init smsg_init(void)
156 if (rc != 0) 156 if (rc != 0)
157 goto out; 157 goto out;
158 rc = iucv_register(&smsg_handler, 1); 158 rc = iucv_register(&smsg_handler, 1);
159 if (rc) { 159 if (rc)
160 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
161 rc = -EIO; /* better errno ? */
162 goto out_driver; 160 goto out_driver;
163 }
164 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); 161 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
165 if (!smsg_path) { 162 if (!smsg_path) {
166 rc = -ENOMEM; 163 rc = -ENOMEM;
@@ -168,11 +165,8 @@ static int __init smsg_init(void)
168 } 165 }
169 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 166 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
170 NULL, NULL, NULL); 167 NULL, NULL, NULL);
171 if (rc) { 168 if (rc)
172 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
173 rc = -EIO; /* better errno ? */
174 goto out_free; 169 goto out_free;
175 }
176 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 170 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
177 return 0; 171 return 0;
178 172
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5bfbe7659830..834e9ee7e934 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -2,10 +2,10 @@
2 * drivers/s390/s390mach.c 2 * drivers/s390/s390mach.c
3 * S/390 machine check handler 3 * S/390 machine check handler
4 * 4 *
5 * S390 version 5 * Copyright IBM Corp. 2000,2008
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
@@ -18,10 +18,6 @@
18#include <asm/etr.h> 18#include <asm/etr.h>
19#include <asm/lowcore.h> 19#include <asm/lowcore.h>
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
24#include "cio/chp.h"
25#include "s390mach.h" 21#include "s390mach.h"
26 22
27static struct semaphore m_sem; 23static struct semaphore m_sem;
@@ -36,13 +32,40 @@ s390_handle_damage(char *msg)
36 for(;;); 32 for(;;);
37} 33}
38 34
35static crw_handler_t crw_handlers[NR_RSCS];
36
37/**
38 * s390_register_crw_handler() - register a channel report word handler
39 * @rsc: reporting source code to handle
40 * @handler: handler to be registered
41 *
42 * Returns %0 on success and a negative error value otherwise.
43 */
44int s390_register_crw_handler(int rsc, crw_handler_t handler)
45{
46 if ((rsc < 0) || (rsc >= NR_RSCS))
47 return -EINVAL;
48 if (!cmpxchg(&crw_handlers[rsc], NULL, handler))
49 return 0;
50 return -EBUSY;
51}
52
53/**
54 * s390_unregister_crw_handler() - unregister a channel report word handler
55 * @rsc: reporting source code to handle
56 */
57void s390_unregister_crw_handler(int rsc)
58{
59 if ((rsc < 0) || (rsc >= NR_RSCS))
60 return;
61 xchg(&crw_handlers[rsc], NULL);
62 synchronize_sched();
63}
64
39/* 65/*
40 * Retrieve CRWs and call function to handle event. 66 * Retrieve CRWs and call function to handle event.
41 *
42 * Note : we currently process CRWs for io and chsc subchannels only
43 */ 67 */
44static int 68static int s390_collect_crw_info(void *param)
45s390_collect_crw_info(void *param)
46{ 69{
47 struct crw crw[2]; 70 struct crw crw[2];
48 int ccode; 71 int ccode;
@@ -84,57 +107,24 @@ repeat:
84 crw[chain].rsid); 107 crw[chain].rsid);
85 /* Check for overflows. */ 108 /* Check for overflows. */
86 if (crw[chain].oflw) { 109 if (crw[chain].oflw) {
110 int i;
111
87 pr_debug("%s: crw overflow detected!\n", __func__); 112 pr_debug("%s: crw overflow detected!\n", __func__);
88 css_schedule_eval_all(); 113 for (i = 0; i < NR_RSCS; i++) {
114 if (crw_handlers[i])
115 crw_handlers[i](NULL, NULL, 1);
116 }
89 chain = 0; 117 chain = 0;
90 continue; 118 continue;
91 } 119 }
92 switch (crw[chain].rsc) { 120 if (crw[0].chn && !chain) {
93 case CRW_RSC_SCH: 121 chain++;
94 if (crw[0].chn && !chain) 122 continue;
95 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 break;
99 case CRW_RSC_MONITOR:
100 pr_debug("source is monitoring facility\n");
101 break;
102 case CRW_RSC_CPATH:
103 pr_debug("source is channel path %02X\n", crw[0].rsid);
104 /*
105 * Check for solicited machine checks. These are
106 * created by reset channel path and need not be
107 * reported to the common I/O layer.
108 */
109 if (crw[chain].slct) {
110 pr_debug("solicited machine check for "
111 "channel path %02X\n", crw[0].rsid);
112 break;
113 }
114 switch (crw[0].erc) {
115 case CRW_ERC_IPARM: /* Path has come. */
116 chp_process_crw(crw[0].rsid, 1);
117 break;
118 case CRW_ERC_PERRI: /* Path has gone. */
119 case CRW_ERC_PERRN:
120 chp_process_crw(crw[0].rsid, 0);
121 break;
122 default:
123 pr_debug("Don't know how to handle erc=%x\n",
124 crw[0].erc);
125 }
126 break;
127 case CRW_RSC_CONFIG:
128 pr_debug("source is configuration-alert facility\n");
129 break;
130 case CRW_RSC_CSS:
131 pr_debug("source is channel subsystem\n");
132 chsc_process_crw();
133 break;
134 default:
135 pr_debug("unknown source\n");
136 break;
137 } 123 }
124 if (crw_handlers[crw[chain].rsc])
125 crw_handlers[crw[chain].rsc](&crw[0],
126 chain ? &crw[1] : NULL,
127 0);
138 /* chain is always 0 or 1 here. */ 128 /* chain is always 0 or 1 here. */
139 chain = crw[chain].chn ? chain + 1 : 0; 129 chain = crw[chain].chn ? chain + 1 : 0;
140 } 130 }
@@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs)
468 etr_sync_check(); 458 etr_sync_check();
469 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 459 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
470 etr_switch_to_local(); 460 etr_switch_to_local();
461 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
462 stp_sync_check();
463 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
464 stp_island_check();
471 } 465 }
472 466
473 if (mci->se) 467 if (mci->se)
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index ca681f9b67fc..d39f8b697d27 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -72,6 +72,13 @@ struct crw {
72 __u32 rsid : 16; /* reporting-source ID */ 72 __u32 rsid : 16; /* reporting-source ID */
73} __attribute__ ((packed)); 73} __attribute__ ((packed));
74 74
75typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
76
77extern int s390_register_crw_handler(int rsc, crw_handler_t handler);
78extern void s390_unregister_crw_handler(int rsc);
79
80#define NR_RSCS 16
81
75#define CRW_RSC_MONITOR 0x2 /* monitoring facility */ 82#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
76#define CRW_RSC_SCH 0x3 /* subchannel */ 83#define CRW_RSC_SCH 0x3 /* subchannel */
77#define CRW_RSC_CPATH 0x4 /* channel path */ 84#define CRW_RSC_CPATH 0x4 /* channel path */
@@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw )
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */ 112#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 113#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107 114
115#define ED_STP_SYNC 7 /* External damage STP sync check */
116#define ED_STP_ISLAND 6 /* External damage STP island check */
117
108struct pt_regs; 118struct pt_regs;
109 119
110void s390_handle_mcck(void); 120void s390_handle_mcck(void);
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index d6a78f1a2f16..cb301cc6178c 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ 6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8 7
9obj-$(CONFIG_ZFCP) += zfcp.o 8obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8c7e2b778ef1..90abfd06ed55 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Module interface and handling of zfcp data structures.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22/* 9/*
@@ -31,93 +18,25 @@
31 * Maxim Shchetynin 18 * Maxim Shchetynin
32 * Volker Sameske 19 * Volker Sameske
33 * Ralph Wuerthner 20 * Ralph Wuerthner
21 * Michael Loehr
22 * Swen Schillig
23 * Christof Schmitt
24 * Martin Petermann
25 * Sven Schuetz
34 */ 26 */
35 27
28#include <linux/miscdevice.h>
36#include "zfcp_ext.h" 29#include "zfcp_ext.h"
37 30
38/* accumulated log level (module parameter) */
39static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
40static char *device; 31static char *device;
41/*********************** FUNCTION PROTOTYPES *********************************/
42
43/* written against the module interface */
44static int __init zfcp_module_init(void);
45
46/* FCP related */
47static void zfcp_ns_gid_pn_handler(unsigned long);
48
49/* miscellaneous */
50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t);
54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t);
56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
57
58#define ZFCP_CFDC_IOC_MAGIC 0xDD
59#define ZFCP_CFDC_IOC \
60 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
61
62
63static const struct file_operations zfcp_cfdc_fops = {
64 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
65#ifdef CONFIG_COMPAT
66 .compat_ioctl = zfcp_cfdc_dev_ioctl
67#endif
68};
69
70static struct miscdevice zfcp_cfdc_misc = {
71 .minor = ZFCP_CFDC_DEV_MINOR,
72 .name = ZFCP_CFDC_DEV_NAME,
73 .fops = &zfcp_cfdc_fops
74};
75
76/*********************** KERNEL/MODULE PARAMETERS ***************************/
77
78/* declare driver module init/cleanup functions */
79module_init(zfcp_module_init);
80 32
81MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); 33MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
82MODULE_DESCRIPTION 34MODULE_DESCRIPTION("FCP HBA driver");
83 ("FCP (SCSI over Fibre Channel) HBA driver for IBM System z9 and zSeries");
84MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
85 36
86module_param(device, charp, 0400); 37module_param(device, charp, 0400);
87MODULE_PARM_DESC(device, "specify initial device"); 38MODULE_PARM_DESC(device, "specify initial device");
88 39
89module_param(loglevel, uint, 0400);
90MODULE_PARM_DESC(loglevel,
91 "log levels, 8 nibbles: "
92 "FC ERP QDIO CIO Config FSF SCSI Other, "
93 "levels: 0=none 1=normal 2=devel 3=trace");
94
95/****************************************************************/
96/************** Functions without logging ***********************/
97/****************************************************************/
98
99void
100_zfcp_hex_dump(char *addr, int count)
101{
102 int i;
103 for (i = 0; i < count; i++) {
104 printk("%02x", addr[i]);
105 if ((i % 4) == 3)
106 printk(" ");
107 if ((i % 32) == 31)
108 printk("\n");
109 }
110 if (((i-1) % 32) != 31)
111 printk("\n");
112}
113
114
115/****************************************************************/
116/****** Functions to handle the request ID hash table ********/
117/****************************************************************/
118
119#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
120
121static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) 40static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
122{ 41{
123 int idx; 42 int idx;
@@ -132,11 +51,12 @@ static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
132 return 0; 51 return 0;
133} 52}
134 53
135static void zfcp_reqlist_free(struct zfcp_adapter *adapter) 54/**
136{ 55 * zfcp_reqlist_isempty - is the request list empty
137 kfree(adapter->req_list); 56 * @adapter: pointer to struct zfcp_adapter
138} 57 *
139 58 * Returns: true if list is empty, false otherwise
59 */
140int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) 60int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
141{ 61{
142 unsigned int idx; 62 unsigned int idx;
@@ -147,62 +67,58 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
147 return 1; 67 return 1;
148} 68}
149 69
150#undef ZFCP_LOG_AREA 70static int __init zfcp_device_setup(char *devstr)
151
152/****************************************************************/
153/************** Uncategorised Functions *************************/
154/****************************************************************/
155
156#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
157
158/**
159 * zfcp_device_setup - setup function
160 * @str: pointer to parameter string
161 *
162 * Parse "device=..." parameter string.
163 */
164static int __init
165zfcp_device_setup(char *devstr)
166{ 71{
167 char *tmp, *str; 72 char *token;
168 size_t len; 73 char *str;
169 74
170 if (!devstr) 75 if (!devstr)
171 return 0; 76 return 0;
172 77
173 len = strlen(devstr) + 1; 78 /* duplicate devstr and keep the original for sysfs presentation*/
174 str = kmalloc(len, GFP_KERNEL); 79 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
175 if (!str) 80 if (!str)
176 goto err_out; 81 return 0;
177 memcpy(str, devstr, len);
178 82
179 tmp = strchr(str, ','); 83 strcpy(str, devstr);
180 if (!tmp)
181 goto err_out;
182 *tmp++ = '\0';
183 strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE);
184 zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0';
185 84
186 zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0); 85 token = strsep(&str, ",");
187 if (*tmp++ != ',') 86 if (!token || strlen(token) >= BUS_ID_SIZE)
188 goto err_out; 87 goto err_out;
189 if (*tmp == '\0') 88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
89
90 token = strsep(&str, ",");
91 if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn))
190 goto err_out; 92 goto err_out;
191 93
192 zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0); 94 token = strsep(&str, ",");
193 if (*tmp != '\0') 95 if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun))
194 goto err_out; 96 goto err_out;
97
195 kfree(str); 98 kfree(str);
196 return 1; 99 return 1;
197 100
198 err_out: 101 err_out:
199 ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str);
200 kfree(str); 102 kfree(str);
103 pr_err("zfcp: Parse error for device parameter string %s, "
104 "device not attached.\n", devstr);
201 return 0; 105 return 0;
202} 106}
203 107
204static void __init 108static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
205zfcp_init_device_configure(void) 109{
110 struct zfcp_adapter *adapter;
111
112 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
113 if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
114 BUS_ID_SIZE) == 0) &&
115 !(atomic_read(&adapter->status) &
116 ZFCP_STATUS_COMMON_REMOVE))
117 return adapter;
118 return NULL;
119}
120
121static void __init zfcp_init_device_configure(void)
206{ 122{
207 struct zfcp_adapter *adapter; 123 struct zfcp_adapter *adapter;
208 struct zfcp_port *port; 124 struct zfcp_port *port;
@@ -215,101 +131,75 @@ zfcp_init_device_configure(void)
215 zfcp_adapter_get(adapter); 131 zfcp_adapter_get(adapter);
216 read_unlock_irq(&zfcp_data.config_lock); 132 read_unlock_irq(&zfcp_data.config_lock);
217 133
218 if (adapter == NULL) 134 if (!adapter)
219 goto out_adapter; 135 goto out_adapter;
220 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0); 136 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
221 if (!port) 137 if (IS_ERR(port))
222 goto out_port; 138 goto out_port;
223 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun); 139 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
224 if (!unit) 140 if (IS_ERR(unit))
225 goto out_unit; 141 goto out_unit;
226 up(&zfcp_data.config_sema); 142 up(&zfcp_data.config_sema);
227 ccw_device_set_online(adapter->ccw_device); 143 ccw_device_set_online(adapter->ccw_device);
228 zfcp_erp_wait(adapter); 144 zfcp_erp_wait(adapter);
229 down(&zfcp_data.config_sema); 145 down(&zfcp_data.config_sema);
230 zfcp_unit_put(unit); 146 zfcp_unit_put(unit);
231 out_unit: 147out_unit:
232 zfcp_port_put(port); 148 zfcp_port_put(port);
233 out_port: 149out_port:
234 zfcp_adapter_put(adapter); 150 zfcp_adapter_put(adapter);
235 out_adapter: 151out_adapter:
236 up(&zfcp_data.config_sema); 152 up(&zfcp_data.config_sema);
237 return; 153 return;
238} 154}
239 155
240static int calc_alignment(int size) 156static struct kmem_cache *zfcp_cache_create(int size, char *name)
241{ 157{
242 int align = 1; 158 int align = 1;
243
244 if (!size)
245 return 0;
246
247 while ((size - align) > 0) 159 while ((size - align) > 0)
248 align <<= 1; 160 align <<= 1;
249 161 return kmem_cache_create(name , size, align, 0, NULL);
250 return align;
251} 162}
252 163
253static int __init 164static int __init zfcp_module_init(void)
254zfcp_module_init(void)
255{ 165{
256 int retval = -ENOMEM; 166 int retval = -ENOMEM;
257 int size, align;
258 167
259 size = sizeof(struct zfcp_fsf_req_qtcb); 168 zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
260 align = calc_alignment(size); 169 sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
261 zfcp_data.fsf_req_qtcb_cache =
262 kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
263 if (!zfcp_data.fsf_req_qtcb_cache) 170 if (!zfcp_data.fsf_req_qtcb_cache)
264 goto out; 171 goto out;
265 172
266 size = sizeof(struct fsf_status_read_buffer); 173 zfcp_data.sr_buffer_cache = zfcp_cache_create(
267 align = calc_alignment(size); 174 sizeof(struct fsf_status_read_buffer), "zfcp_sr");
268 zfcp_data.sr_buffer_cache =
269 kmem_cache_create("zfcp_sr", size, align, 0, NULL);
270 if (!zfcp_data.sr_buffer_cache) 175 if (!zfcp_data.sr_buffer_cache)
271 goto out_sr_cache; 176 goto out_sr_cache;
272 177
273 size = sizeof(struct zfcp_gid_pn_data); 178 zfcp_data.gid_pn_cache = zfcp_cache_create(
274 align = calc_alignment(size); 179 sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
275 zfcp_data.gid_pn_cache =
276 kmem_cache_create("zfcp_gid", size, align, 0, NULL);
277 if (!zfcp_data.gid_pn_cache) 180 if (!zfcp_data.gid_pn_cache)
278 goto out_gid_cache; 181 goto out_gid_cache;
279 182
280 atomic_set(&zfcp_data.loglevel, loglevel);
281
282 /* initialize adapter list */
283 INIT_LIST_HEAD(&zfcp_data.adapter_list_head); 183 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
284
285 /* initialize adapters to be removed list head */
286 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); 184 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
287 185
186 sema_init(&zfcp_data.config_sema, 1);
187 rwlock_init(&zfcp_data.config_lock);
188
288 zfcp_data.scsi_transport_template = 189 zfcp_data.scsi_transport_template =
289 fc_attach_transport(&zfcp_transport_functions); 190 fc_attach_transport(&zfcp_transport_functions);
290 if (!zfcp_data.scsi_transport_template) 191 if (!zfcp_data.scsi_transport_template)
291 goto out_transport; 192 goto out_transport;
292 193
293 retval = misc_register(&zfcp_cfdc_misc); 194 retval = misc_register(&zfcp_cfdc_misc);
294 if (retval != 0) { 195 if (retval) {
295 ZFCP_LOG_INFO("registration of misc device " 196 pr_err("zfcp: registration of misc device zfcp_cfdc failed\n");
296 "zfcp_cfdc failed\n");
297 goto out_misc; 197 goto out_misc;
298 } 198 }
299 199
300 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
301 ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
302
303 /* Initialise proc semaphores */
304 sema_init(&zfcp_data.config_sema, 1);
305
306 /* initialise configuration rw lock */
307 rwlock_init(&zfcp_data.config_lock);
308
309 /* setup dynamic I/O */
310 retval = zfcp_ccw_register(); 200 retval = zfcp_ccw_register();
311 if (retval) { 201 if (retval) {
312 ZFCP_LOG_NORMAL("registration with common I/O layer failed\n"); 202 pr_err("zfcp: Registration with common I/O layer failed.\n");
313 goto out_ccw_register; 203 goto out_ccw_register;
314 } 204 }
315 205
@@ -318,527 +208,88 @@ zfcp_module_init(void)
318 208
319 goto out; 209 goto out;
320 210
321 out_ccw_register: 211out_ccw_register:
322 misc_deregister(&zfcp_cfdc_misc); 212 misc_deregister(&zfcp_cfdc_misc);
323 out_misc: 213out_misc:
324 fc_release_transport(zfcp_data.scsi_transport_template); 214 fc_release_transport(zfcp_data.scsi_transport_template);
325 out_transport: 215out_transport:
326 kmem_cache_destroy(zfcp_data.gid_pn_cache); 216 kmem_cache_destroy(zfcp_data.gid_pn_cache);
327 out_gid_cache: 217out_gid_cache:
328 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 218 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
329 out_sr_cache: 219out_sr_cache:
330 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); 220 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
331 out: 221out:
332 return retval; 222 return retval;
333} 223}
334 224
335/* 225module_init(zfcp_module_init);
336 * function: zfcp_cfdc_dev_ioctl
337 *
338 * purpose: Handle control file upload/download transaction via IOCTL
339 * interface
340 *
341 * returns: 0 - Operation completed successfuly
342 * -ENOTTY - Unknown IOCTL command
343 * -EINVAL - Invalid sense data record
344 * -ENXIO - The FCP adapter is not available
345 * -EOPNOTSUPP - The FCP adapter does not have CFDC support
346 * -ENOMEM - Insufficient memory
347 * -EFAULT - User space memory I/O operation fault
348 * -EPERM - Cannot create or queue FSF request or create SBALs
349 * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
350 */
351static long
352zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
353 unsigned long buffer)
354{
355 struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
356 struct zfcp_adapter *adapter = NULL;
357 struct zfcp_fsf_req *fsf_req = NULL;
358 struct zfcp_sg_list *sg_list = NULL;
359 u32 fsf_command, option;
360 char *bus_id = NULL;
361 int retval = 0;
362
363 sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL);
364 if (sense_data == NULL) {
365 retval = -ENOMEM;
366 goto out;
367 }
368
369 sg_list = kzalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL);
370 if (sg_list == NULL) {
371 retval = -ENOMEM;
372 goto out;
373 }
374
375 if (command != ZFCP_CFDC_IOC) {
376 ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command);
377 retval = -ENOTTY;
378 goto out;
379 }
380
381 if ((sense_data_user = (void __user *) buffer) == NULL) {
382 ZFCP_LOG_INFO("sense data record is required\n");
383 retval = -EINVAL;
384 goto out;
385 }
386
387 retval = copy_from_user(sense_data, sense_data_user,
388 sizeof(struct zfcp_cfdc_sense_data));
389 if (retval) {
390 retval = -EFAULT;
391 goto out;
392 }
393
394 if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
395 ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n",
396 ZFCP_CFDC_SIGNATURE);
397 retval = -EINVAL;
398 goto out;
399 }
400
401 switch (sense_data->command) {
402
403 case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
404 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
405 option = FSF_CFDC_OPTION_NORMAL_MODE;
406 break;
407
408 case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
409 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
410 option = FSF_CFDC_OPTION_FORCE;
411 break;
412
413 case ZFCP_CFDC_CMND_FULL_ACCESS:
414 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
415 option = FSF_CFDC_OPTION_FULL_ACCESS;
416 break;
417
418 case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
419 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
420 option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
421 break;
422
423 case ZFCP_CFDC_CMND_UPLOAD:
424 fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
425 option = 0;
426 break;
427
428 default:
429 ZFCP_LOG_INFO("invalid command code 0x%08x\n",
430 sense_data->command);
431 retval = -EINVAL;
432 goto out;
433 }
434
435 bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
436 if (bus_id == NULL) {
437 retval = -ENOMEM;
438 goto out;
439 }
440 snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x",
441 (sense_data->devno >> 24),
442 (sense_data->devno >> 16) & 0xFF,
443 (sense_data->devno & 0xFFFF));
444
445 read_lock_irq(&zfcp_data.config_lock);
446 adapter = zfcp_get_adapter_by_busid(bus_id);
447 if (adapter)
448 zfcp_adapter_get(adapter);
449 read_unlock_irq(&zfcp_data.config_lock);
450
451 kfree(bus_id);
452
453 if (adapter == NULL) {
454 ZFCP_LOG_INFO("invalid adapter\n");
455 retval = -ENXIO;
456 goto out;
457 }
458
459 if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
460 retval = zfcp_sg_list_alloc(sg_list,
461 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
462 if (retval) {
463 retval = -ENOMEM;
464 goto out;
465 }
466 }
467
468 if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
469 (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
470 retval = zfcp_sg_list_copy_from_user(
471 sg_list, &sense_data_user->control_file,
472 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
473 if (retval) {
474 retval = -EFAULT;
475 goto out;
476 }
477 }
478
479 retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command,
480 option, sg_list);
481 if (retval)
482 goto out;
483
484 if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
485 (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
486 retval = -ENXIO;
487 goto out;
488 }
489
490 sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
491 memcpy(&sense_data->fsf_status_qual,
492 &fsf_req->qtcb->header.fsf_status_qual,
493 sizeof(union fsf_status_qual));
494 memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
495
496 retval = copy_to_user(sense_data_user, sense_data,
497 sizeof(struct zfcp_cfdc_sense_data));
498 if (retval) {
499 retval = -EFAULT;
500 goto out;
501 }
502
503 if (sense_data->command & ZFCP_CFDC_UPLOAD) {
504 retval = zfcp_sg_list_copy_to_user(
505 &sense_data_user->control_file, sg_list,
506 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
507 if (retval) {
508 retval = -EFAULT;
509 goto out;
510 }
511 }
512
513 out:
514 if (fsf_req != NULL)
515 zfcp_fsf_req_free(fsf_req);
516
517 if ((adapter != NULL) && (retval != -ENXIO))
518 zfcp_adapter_put(adapter);
519
520 if (sg_list != NULL) {
521 zfcp_sg_list_free(sg_list);
522 kfree(sg_list);
523 }
524
525 kfree(sense_data);
526
527 return retval;
528}
529
530
531/**
532 * zfcp_sg_list_alloc - create a scatter-gather list of the specified size
533 * @sg_list: structure describing a scatter gather list
534 * @size: size of scatter-gather list
535 * Return: 0 on success, else -ENOMEM
536 *
537 * In sg_list->sg a pointer to the created scatter-gather list is returned,
538 * or NULL if we run out of memory. sg_list->count specifies the number of
539 * elements of the scatter-gather list. The maximum size of a single element
540 * in the scatter-gather list is PAGE_SIZE.
541 */
542static int
543zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
544{
545 struct scatterlist *sg;
546 unsigned int i;
547 int retval = 0;
548 void *address;
549
550 BUG_ON(sg_list == NULL);
551
552 sg_list->count = size >> PAGE_SHIFT;
553 if (size & ~PAGE_MASK)
554 sg_list->count++;
555 sg_list->sg = kcalloc(sg_list->count, sizeof(struct scatterlist),
556 GFP_KERNEL);
557 if (sg_list->sg == NULL) {
558 sg_list->count = 0;
559 retval = -ENOMEM;
560 goto out;
561 }
562 sg_init_table(sg_list->sg, sg_list->count);
563
564 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
565 address = (void *) get_zeroed_page(GFP_KERNEL);
566 if (address == NULL) {
567 sg_list->count = i;
568 zfcp_sg_list_free(sg_list);
569 retval = -ENOMEM;
570 goto out;
571 }
572 zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE));
573 size -= sg->length;
574 }
575
576 out:
577 return retval;
578}
579
580
581/**
582 * zfcp_sg_list_free - free memory of a scatter-gather list
583 * @sg_list: structure describing a scatter-gather list
584 *
585 * Memory for each element in the scatter-gather list is freed.
586 * Finally sg_list->sg is freed itself and sg_list->count is reset.
587 */
588static void
589zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
590{
591 struct scatterlist *sg;
592 unsigned int i;
593
594 BUG_ON(sg_list == NULL);
595
596 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
597 free_page((unsigned long) zfcp_sg_to_address(sg));
598
599 sg_list->count = 0;
600 kfree(sg_list->sg);
601}
602
603/**
604 * zfcp_sg_size - determine size of a scatter-gather list
605 * @sg: array of (struct scatterlist)
606 * @sg_count: elements in array
607 * Return: size of entire scatter-gather list
608 */
609static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
610{
611 unsigned int i;
612 struct scatterlist *p;
613 size_t size;
614
615 size = 0;
616 for (i = 0, p = sg; i < sg_count; i++, p++) {
617 BUG_ON(p == NULL);
618 size += p->length;
619 }
620
621 return size;
622}
623
624
625/**
626 * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list
627 * @sg_list: structure describing a scatter-gather list
628 * @user_buffer: pointer to buffer in user space
629 * @size: number of bytes to be copied
630 * Return: 0 on success, -EFAULT if copy_from_user fails.
631 */
632static int
633zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
634 void __user *user_buffer,
635 size_t size)
636{
637 struct scatterlist *sg;
638 unsigned int length;
639 void *zfcp_buffer;
640 int retval = 0;
641
642 BUG_ON(sg_list == NULL);
643
644 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
645 return -EFAULT;
646
647 for (sg = sg_list->sg; size > 0; sg++) {
648 length = min((unsigned int)size, sg->length);
649 zfcp_buffer = zfcp_sg_to_address(sg);
650 if (copy_from_user(zfcp_buffer, user_buffer, length)) {
651 retval = -EFAULT;
652 goto out;
653 }
654 user_buffer += length;
655 size -= length;
656 }
657
658 out:
659 return retval;
660}
661
662
663/**
664 * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space
665 * @user_buffer: pointer to buffer in user space
666 * @sg_list: structure describing a scatter-gather list
667 * @size: number of bytes to be copied
668 * Return: 0 on success, -EFAULT if copy_to_user fails
669 */
670static int
671zfcp_sg_list_copy_to_user(void __user *user_buffer,
672 struct zfcp_sg_list *sg_list,
673 size_t size)
674{
675 struct scatterlist *sg;
676 unsigned int length;
677 void *zfcp_buffer;
678 int retval = 0;
679
680 BUG_ON(sg_list == NULL);
681
682 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
683 return -EFAULT;
684
685 for (sg = sg_list->sg; size > 0; sg++) {
686 length = min((unsigned int) size, sg->length);
687 zfcp_buffer = zfcp_sg_to_address(sg);
688 if (copy_to_user(user_buffer, zfcp_buffer, length)) {
689 retval = -EFAULT;
690 goto out;
691 }
692 user_buffer += length;
693 size -= length;
694 }
695
696 out:
697 return retval;
698}
699
700
701#undef ZFCP_LOG_AREA
702
703/****************************************************************/
704/****** Functions for configuration/set-up of structures ********/
705/****************************************************************/
706
707#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
708 226
709/** 227/**
710 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN 228 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
711 * @port: pointer to port to search for unit 229 * @port: pointer to port to search for unit
712 * @fcp_lun: FCP LUN to search for 230 * @fcp_lun: FCP LUN to search for
713 * Traverse list of all units of a port and return pointer to a unit 231 *
714 * with the given FCP LUN. 232 * Returns: pointer to zfcp_unit or NULL
715 */ 233 */
716struct zfcp_unit * 234struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
717zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun) 235 fcp_lun_t fcp_lun)
718{ 236{
719 struct zfcp_unit *unit; 237 struct zfcp_unit *unit;
720 int found = 0;
721 238
722 list_for_each_entry(unit, &port->unit_list_head, list) { 239 list_for_each_entry(unit, &port->unit_list_head, list)
723 if ((unit->fcp_lun == fcp_lun) && 240 if ((unit->fcp_lun == fcp_lun) &&
724 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) 241 !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
725 { 242 return unit;
726 found = 1; 243 return NULL;
727 break;
728 }
729 }
730 return found ? unit : NULL;
731} 244}
732 245
733/** 246/**
734 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 247 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
735 * @adapter: pointer to adapter to search for port 248 * @adapter: pointer to adapter to search for port
736 * @wwpn: wwpn to search for 249 * @wwpn: wwpn to search for
737 * Traverse list of all ports of an adapter and return pointer to a port 250 *
738 * with the given wwpn. 251 * Returns: pointer to zfcp_port or NULL
739 */
740struct zfcp_port *
741zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn)
742{
743 struct zfcp_port *port;
744 int found = 0;
745
746 list_for_each_entry(port, &adapter->port_list_head, list) {
747 if ((port->wwpn == wwpn) &&
748 !(atomic_read(&port->status) &
749 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) {
750 found = 1;
751 break;
752 }
753 }
754 return found ? port : NULL;
755}
756
757/**
758 * zfcp_get_port_by_did - find port in port list of adapter by d_id
759 * @adapter: pointer to adapter to search for port
760 * @d_id: d_id to search for
761 * Traverse list of all ports of an adapter and return pointer to a port
762 * with the given d_id.
763 */ 252 */
764struct zfcp_port * 253struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
765zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id) 254 wwn_t wwpn)
766{ 255{
767 struct zfcp_port *port; 256 struct zfcp_port *port;
768 int found = 0;
769 257
770 list_for_each_entry(port, &adapter->port_list_head, list) { 258 list_for_each_entry(port, &adapter->port_list_head, list)
771 if ((port->d_id == d_id) && 259 if ((port->wwpn == wwpn) && !(atomic_read(&port->status) &
772 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) 260 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE)))
773 { 261 return port;
774 found = 1; 262 return NULL;
775 break;
776 }
777 }
778 return found ? port : NULL;
779} 263}
780 264
781/** 265static void zfcp_sysfs_unit_release(struct device *dev)
782 * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id
783 * @bus_id: bus_id to search for
784 * Traverse list of all adapters and return pointer to an adapter
785 * with the given bus_id.
786 */
787struct zfcp_adapter *
788zfcp_get_adapter_by_busid(char *bus_id)
789{ 266{
790 struct zfcp_adapter *adapter; 267 kfree(container_of(dev, struct zfcp_unit, sysfs_device));
791 int found = 0;
792
793 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) {
794 if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter),
795 BUS_ID_SIZE) == 0) &&
796 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE,
797 &adapter->status)){
798 found = 1;
799 break;
800 }
801 }
802 return found ? adapter : NULL;
803} 268}
804 269
805/** 270/**
806 * zfcp_unit_enqueue - enqueue unit to unit list of a port. 271 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
807 * @port: pointer to port where unit is added 272 * @port: pointer to port where unit is added
808 * @fcp_lun: FCP LUN of unit to be enqueued 273 * @fcp_lun: FCP LUN of unit to be enqueued
809 * Return: pointer to enqueued unit on success, NULL on error 274 * Returns: pointer to enqueued unit on success, ERR_PTR on error
810 * Locks: config_sema must be held to serialize changes to the unit list 275 * Locks: config_sema must be held to serialize changes to the unit list
811 * 276 *
812 * Sets up some unit internal structures and creates sysfs entry. 277 * Sets up some unit internal structures and creates sysfs entry.
813 */ 278 */
814struct zfcp_unit * 279struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
815zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
816{ 280{
817 struct zfcp_unit *unit; 281 struct zfcp_unit *unit;
818 282
819 /* 283 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
820 * check that there is no unit with this FCP_LUN already in list
821 * and enqueue it.
822 * Note: Unlike for the adapter and the port, this is an error
823 */
824 read_lock_irq(&zfcp_data.config_lock);
825 unit = zfcp_get_unit_by_lun(port, fcp_lun);
826 read_unlock_irq(&zfcp_data.config_lock);
827 if (unit)
828 return NULL;
829
830 unit = kzalloc(sizeof (struct zfcp_unit), GFP_KERNEL);
831 if (!unit) 284 if (!unit)
832 return NULL; 285 return ERR_PTR(-ENOMEM);
833 286
834 /* initialise reference count stuff */
835 atomic_set(&unit->refcount, 0); 287 atomic_set(&unit->refcount, 0);
836 init_waitqueue_head(&unit->remove_wq); 288 init_waitqueue_head(&unit->remove_wq);
837 289
838 unit->port = port; 290 unit->port = port;
839 unit->fcp_lun = fcp_lun; 291 unit->fcp_lun = fcp_lun;
840 292
841 /* setup for sysfs registration */
842 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); 293 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
843 unit->sysfs_device.parent = &port->sysfs_device; 294 unit->sysfs_device.parent = &port->sysfs_device;
844 unit->sysfs_device.release = zfcp_sysfs_unit_release; 295 unit->sysfs_device.release = zfcp_sysfs_unit_release;
@@ -847,14 +298,28 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
847 /* mark unit unusable as long as sysfs registration is not complete */ 298 /* mark unit unusable as long as sysfs registration is not complete */
848 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 299 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
849 300
850 if (device_register(&unit->sysfs_device)) { 301 spin_lock_init(&unit->latencies.lock);
851 kfree(unit); 302 unit->latencies.write.channel.min = 0xFFFFFFFF;
852 return NULL; 303 unit->latencies.write.fabric.min = 0xFFFFFFFF;
304 unit->latencies.read.channel.min = 0xFFFFFFFF;
305 unit->latencies.read.fabric.min = 0xFFFFFFFF;
306 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
307 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
308
309 read_lock_irq(&zfcp_data.config_lock);
310 if (zfcp_get_unit_by_lun(port, fcp_lun)) {
311 read_unlock_irq(&zfcp_data.config_lock);
312 goto err_out_free;
853 } 313 }
314 read_unlock_irq(&zfcp_data.config_lock);
854 315
855 if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) { 316 if (device_register(&unit->sysfs_device))
317 goto err_out_free;
318
319 if (sysfs_create_group(&unit->sysfs_device.kobj,
320 &zfcp_sysfs_unit_attrs)) {
856 device_unregister(&unit->sysfs_device); 321 device_unregister(&unit->sysfs_device);
857 return NULL; 322 return ERR_PTR(-EIO);
858 } 323 }
859 324
860 zfcp_unit_get(unit); 325 zfcp_unit_get(unit);
@@ -864,16 +329,27 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
864 list_add_tail(&unit->list, &port->unit_list_head); 329 list_add_tail(&unit->list, &port->unit_list_head);
865 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 330 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
866 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); 331 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
332
867 write_unlock_irq(&zfcp_data.config_lock); 333 write_unlock_irq(&zfcp_data.config_lock);
868 334
869 port->units++; 335 port->units++;
870 zfcp_port_get(port); 336 zfcp_port_get(port);
871 337
872 return unit; 338 return unit;
339
340err_out_free:
341 kfree(unit);
342 return ERR_PTR(-EINVAL);
873} 343}
874 344
875void 345/**
876zfcp_unit_dequeue(struct zfcp_unit *unit) 346 * zfcp_unit_dequeue - dequeue unit
347 * @unit: pointer to zfcp_unit
348 *
349 * waits until all work is done on unit and removes it then from the unit->list
350 * of the associated port.
351 */
352void zfcp_unit_dequeue(struct zfcp_unit *unit)
877{ 353{
878 zfcp_unit_wait(unit); 354 zfcp_unit_wait(unit);
879 write_lock_irq(&zfcp_data.config_lock); 355 write_lock_irq(&zfcp_data.config_lock);
@@ -881,68 +357,51 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
881 write_unlock_irq(&zfcp_data.config_lock); 357 write_unlock_irq(&zfcp_data.config_lock);
882 unit->port->units--; 358 unit->port->units--;
883 zfcp_port_put(unit->port); 359 zfcp_port_put(unit->port);
884 zfcp_sysfs_unit_remove_files(&unit->sysfs_device); 360 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
885 device_unregister(&unit->sysfs_device); 361 device_unregister(&unit->sysfs_device);
886} 362}
887 363
888/* 364static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
889 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
890 * commands.
891 * It also genrates fcp-nameserver request/response buffer and unsolicited
892 * status read fsf_req buffers.
893 *
894 * locks: must only be called with zfcp_data.config_sema taken
895 */
896static int
897zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
898{ 365{
366 /* must only be called with zfcp_data.config_sema taken */
899 adapter->pool.fsf_req_erp = 367 adapter->pool.fsf_req_erp =
900 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR, 368 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
901 zfcp_data.fsf_req_qtcb_cache);
902 if (!adapter->pool.fsf_req_erp) 369 if (!adapter->pool.fsf_req_erp)
903 return -ENOMEM; 370 return -ENOMEM;
904 371
905 adapter->pool.fsf_req_scsi = 372 adapter->pool.fsf_req_scsi =
906 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, 373 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
907 zfcp_data.fsf_req_qtcb_cache);
908 if (!adapter->pool.fsf_req_scsi) 374 if (!adapter->pool.fsf_req_scsi)
909 return -ENOMEM; 375 return -ENOMEM;
910 376
911 adapter->pool.fsf_req_abort = 377 adapter->pool.fsf_req_abort =
912 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, 378 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
913 zfcp_data.fsf_req_qtcb_cache);
914 if (!adapter->pool.fsf_req_abort) 379 if (!adapter->pool.fsf_req_abort)
915 return -ENOMEM; 380 return -ENOMEM;
916 381
917 adapter->pool.fsf_req_status_read = 382 adapter->pool.fsf_req_status_read =
918 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, 383 mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
919 sizeof(struct zfcp_fsf_req)); 384 sizeof(struct zfcp_fsf_req));
920 if (!adapter->pool.fsf_req_status_read) 385 if (!adapter->pool.fsf_req_status_read)
921 return -ENOMEM; 386 return -ENOMEM;
922 387
923 adapter->pool.data_status_read = 388 adapter->pool.data_status_read =
924 mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR, 389 mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
925 zfcp_data.sr_buffer_cache); 390 zfcp_data.sr_buffer_cache);
926 if (!adapter->pool.data_status_read) 391 if (!adapter->pool.data_status_read)
927 return -ENOMEM; 392 return -ENOMEM;
928 393
929 adapter->pool.data_gid_pn = 394 adapter->pool.data_gid_pn =
930 mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR, 395 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
931 zfcp_data.gid_pn_cache);
932 if (!adapter->pool.data_gid_pn) 396 if (!adapter->pool.data_gid_pn)
933 return -ENOMEM; 397 return -ENOMEM;
934 398
935 return 0; 399 return 0;
936} 400}
937 401
938/** 402static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
939 * zfcp_free_low_mem_buffers - free memory pools of an adapter
940 * @adapter: pointer to zfcp_adapter for which memory pools should be freed
941 * locking: zfcp_data.config_sema must be held
942 */
943static void
944zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
945{ 403{
404 /* zfcp_data.config_sema must be held */
946 if (adapter->pool.fsf_req_erp) 405 if (adapter->pool.fsf_req_erp)
947 mempool_destroy(adapter->pool.fsf_req_erp); 406 mempool_destroy(adapter->pool.fsf_req_erp);
948 if (adapter->pool.fsf_req_scsi) 407 if (adapter->pool.fsf_req_scsi)
@@ -962,20 +421,61 @@ static void zfcp_dummy_release(struct device *dev)
962 return; 421 return;
963} 422}
964 423
965/* 424/**
425 * zfcp_status_read_refill - refill the long running status_read_requests
426 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
427 *
428 * Returns: 0 on success, 1 otherwise
429 *
430 * if there are 16 or more status_read requests missing an adapter_reopen
431 * is triggered
432 */
433int zfcp_status_read_refill(struct zfcp_adapter *adapter)
434{
435 while (atomic_read(&adapter->stat_miss) > 0)
436 if (zfcp_fsf_status_read(adapter)) {
437 if (atomic_read(&adapter->stat_miss) >= 16) {
438 zfcp_erp_adapter_reopen(adapter, 0, 103, NULL);
439 return 1;
440 }
441 break;
442 } else
443 atomic_dec(&adapter->stat_miss);
444 return 0;
445}
446
447static void _zfcp_status_read_scheduler(struct work_struct *work)
448{
449 zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
450 stat_work));
451}
452
453static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
454{
455 struct zfcp_port *port;
456
457 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
458 ZFCP_DID_DIRECTORY_SERVICE);
459 if (IS_ERR(port))
460 return PTR_ERR(port);
461 zfcp_port_put(port);
462
463 return 0;
464}
465
466/**
467 * zfcp_adapter_enqueue - enqueue a new adapter to the list
468 * @ccw_device: pointer to the struct cc_device
469 *
470 * Returns: 0 if a new adapter was successfully enqueued
471 * -ENOMEM if alloc failed
966 * Enqueues an adapter at the end of the adapter list in the driver data. 472 * Enqueues an adapter at the end of the adapter list in the driver data.
967 * All adapter internal structures are set up. 473 * All adapter internal structures are set up.
968 * Proc-fs entries are also created. 474 * Proc-fs entries are also created.
969 *
970 * returns: 0 if a new adapter was successfully enqueued
971 * ZFCP_KNOWN if an adapter with this devno was already present
972 * -ENOMEM if alloc failed
973 * locks: config_sema must be held to serialise changes to the adapter list 475 * locks: config_sema must be held to serialise changes to the adapter list
974 */ 476 */
975struct zfcp_adapter * 477int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
976zfcp_adapter_enqueue(struct ccw_device *ccw_device)
977{ 478{
978 int retval = 0;
979 struct zfcp_adapter *adapter; 479 struct zfcp_adapter *adapter;
980 480
981 /* 481 /*
@@ -983,85 +483,58 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
983 * are protected by the config_sema, which must be held to get here 483 * are protected by the config_sema, which must be held to get here
984 */ 484 */
985 485
986 /* try to allocate new adapter data structure (zeroed) */ 486 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
987 adapter = kzalloc(sizeof (struct zfcp_adapter), GFP_KERNEL); 487 if (!adapter)
988 if (!adapter) { 488 return -ENOMEM;
989 ZFCP_LOG_INFO("error: allocation of base adapter "
990 "structure failed\n");
991 goto out;
992 }
993 489
994 ccw_device->handler = NULL; 490 ccw_device->handler = NULL;
995
996 /* save ccw_device pointer */
997 adapter->ccw_device = ccw_device; 491 adapter->ccw_device = ccw_device;
492 atomic_set(&adapter->refcount, 0);
998 493
999 retval = zfcp_qdio_allocate_queues(adapter); 494 if (zfcp_qdio_allocate(adapter))
1000 if (retval)
1001 goto queues_alloc_failed;
1002
1003 retval = zfcp_qdio_allocate(adapter);
1004 if (retval)
1005 goto qdio_allocate_failed; 495 goto qdio_allocate_failed;
1006 496
1007 retval = zfcp_allocate_low_mem_buffers(adapter); 497 if (zfcp_allocate_low_mem_buffers(adapter))
1008 if (retval) {
1009 ZFCP_LOG_INFO("error: pool allocation failed\n");
1010 goto failed_low_mem_buffers; 498 goto failed_low_mem_buffers;
1011 }
1012 499
1013 /* initialise reference count stuff */ 500 if (zfcp_reqlist_alloc(adapter))
1014 atomic_set(&adapter->refcount, 0); 501 goto failed_low_mem_buffers;
502
503 if (zfcp_adapter_debug_register(adapter))
504 goto debug_register_failed;
505
1015 init_waitqueue_head(&adapter->remove_wq); 506 init_waitqueue_head(&adapter->remove_wq);
507 init_waitqueue_head(&adapter->erp_thread_wqh);
508 init_waitqueue_head(&adapter->erp_done_wqh);
1016 509
1017 /* initialise list of ports */
1018 INIT_LIST_HEAD(&adapter->port_list_head); 510 INIT_LIST_HEAD(&adapter->port_list_head);
1019
1020 /* initialise list of ports to be removed */
1021 INIT_LIST_HEAD(&adapter->port_remove_lh); 511 INIT_LIST_HEAD(&adapter->port_remove_lh);
512 INIT_LIST_HEAD(&adapter->erp_ready_head);
513 INIT_LIST_HEAD(&adapter->erp_running_head);
1022 514
1023 /* initialize list of fsf requests */
1024 spin_lock_init(&adapter->req_list_lock); 515 spin_lock_init(&adapter->req_list_lock);
1025 retval = zfcp_reqlist_alloc(adapter);
1026 if (retval) {
1027 ZFCP_LOG_INFO("request list initialization failed\n");
1028 goto failed_low_mem_buffers;
1029 }
1030
1031 /* initialize debug locks */
1032 516
1033 spin_lock_init(&adapter->hba_dbf_lock); 517 spin_lock_init(&adapter->hba_dbf_lock);
1034 spin_lock_init(&adapter->san_dbf_lock); 518 spin_lock_init(&adapter->san_dbf_lock);
1035 spin_lock_init(&adapter->scsi_dbf_lock); 519 spin_lock_init(&adapter->scsi_dbf_lock);
1036 spin_lock_init(&adapter->rec_dbf_lock); 520 spin_lock_init(&adapter->rec_dbf_lock);
1037 521 spin_lock_init(&adapter->req_q.lock);
1038 retval = zfcp_adapter_debug_register(adapter);
1039 if (retval)
1040 goto debug_register_failed;
1041
1042 /* initialize error recovery stuff */
1043 522
1044 rwlock_init(&adapter->erp_lock); 523 rwlock_init(&adapter->erp_lock);
1045 sema_init(&adapter->erp_ready_sem, 0);
1046 INIT_LIST_HEAD(&adapter->erp_ready_head);
1047 INIT_LIST_HEAD(&adapter->erp_running_head);
1048
1049 /* initialize abort lock */
1050 rwlock_init(&adapter->abort_lock); 524 rwlock_init(&adapter->abort_lock);
1051 525
1052 /* initialise some erp stuff */ 526 sema_init(&adapter->erp_ready_sem, 0);
1053 init_waitqueue_head(&adapter->erp_thread_wqh);
1054 init_waitqueue_head(&adapter->erp_done_wqh);
1055 527
1056 /* initialize lock of associated request queue */ 528 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
1057 rwlock_init(&adapter->request_queue.queue_lock); 529 INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
1058 530
1059 /* mark adapter unusable as long as sysfs registration is not complete */ 531 /* mark adapter unusable as long as sysfs registration is not complete */
1060 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 532 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1061 533
1062 dev_set_drvdata(&ccw_device->dev, adapter); 534 dev_set_drvdata(&ccw_device->dev, adapter);
1063 535
1064 if (zfcp_sysfs_adapter_create_files(&ccw_device->dev)) 536 if (sysfs_create_group(&ccw_device->dev.kobj,
537 &zfcp_sysfs_adapter_attrs))
1065 goto sysfs_failed; 538 goto sysfs_failed;
1066 539
1067 adapter->generic_services.parent = &adapter->ccw_device->dev; 540 adapter->generic_services.parent = &adapter->ccw_device->dev;
@@ -1072,7 +545,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1072 if (device_register(&adapter->generic_services)) 545 if (device_register(&adapter->generic_services))
1073 goto generic_services_failed; 546 goto generic_services_failed;
1074 547
1075 /* put allocated adapter at list tail */
1076 write_lock_irq(&zfcp_data.config_lock); 548 write_lock_irq(&zfcp_data.config_lock);
1077 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 549 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1078 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); 550 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
@@ -1080,57 +552,49 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1080 552
1081 zfcp_data.adapters++; 553 zfcp_data.adapters++;
1082 554
1083 goto out; 555 zfcp_nameserver_enqueue(adapter);
556
557 return 0;
1084 558
1085 generic_services_failed: 559generic_services_failed:
1086 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 560 sysfs_remove_group(&ccw_device->dev.kobj,
1087 sysfs_failed: 561 &zfcp_sysfs_adapter_attrs);
562sysfs_failed:
1088 zfcp_adapter_debug_unregister(adapter); 563 zfcp_adapter_debug_unregister(adapter);
1089 debug_register_failed: 564debug_register_failed:
1090 dev_set_drvdata(&ccw_device->dev, NULL); 565 dev_set_drvdata(&ccw_device->dev, NULL);
1091 zfcp_reqlist_free(adapter); 566 kfree(adapter->req_list);
1092 failed_low_mem_buffers: 567failed_low_mem_buffers:
1093 zfcp_free_low_mem_buffers(adapter); 568 zfcp_free_low_mem_buffers(adapter);
1094 if (qdio_free(ccw_device) != 0) 569qdio_allocate_failed:
1095 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", 570 zfcp_qdio_free(adapter);
1096 zfcp_get_busid_by_adapter(adapter));
1097 qdio_allocate_failed:
1098 zfcp_qdio_free_queues(adapter);
1099 queues_alloc_failed:
1100 kfree(adapter); 571 kfree(adapter);
1101 adapter = NULL; 572 return -ENOMEM;
1102 out:
1103 return adapter;
1104} 573}
1105 574
1106/* 575/**
1107 * returns: 0 - struct zfcp_adapter data structure successfully removed 576 * zfcp_adapter_dequeue - remove the adapter from the resource list
1108 * !0 - struct zfcp_adapter data structure could not be removed 577 * @adapter: pointer to struct zfcp_adapter which should be removed
1109 * (e.g. still used)
1110 * locks: adapter list write lock is assumed to be held by caller 578 * locks: adapter list write lock is assumed to be held by caller
1111 */ 579 */
1112void 580void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1113zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1114{ 581{
1115 int retval = 0; 582 int retval = 0;
1116 unsigned long flags; 583 unsigned long flags;
1117 584
585 cancel_work_sync(&adapter->scan_work);
586 cancel_work_sync(&adapter->stat_work);
1118 zfcp_adapter_scsi_unregister(adapter); 587 zfcp_adapter_scsi_unregister(adapter);
1119 device_unregister(&adapter->generic_services); 588 device_unregister(&adapter->generic_services);
1120 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 589 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
590 &zfcp_sysfs_adapter_attrs);
1121 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 591 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1122 /* sanity check: no pending FSF requests */ 592 /* sanity check: no pending FSF requests */
1123 spin_lock_irqsave(&adapter->req_list_lock, flags); 593 spin_lock_irqsave(&adapter->req_list_lock, flags);
1124 retval = zfcp_reqlist_isempty(adapter); 594 retval = zfcp_reqlist_isempty(adapter);
1125 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 595 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1126 if (!retval) { 596 if (!retval)
1127 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 597 return;
1128 "%i requests outstanding\n",
1129 zfcp_get_busid_by_adapter(adapter), adapter,
1130 atomic_read(&adapter->reqs_active));
1131 retval = -EBUSY;
1132 goto out;
1133 }
1134 598
1135 zfcp_adapter_debug_unregister(adapter); 599 zfcp_adapter_debug_unregister(adapter);
1136 600
@@ -1142,26 +606,18 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1142 /* decrease number of adapters in list */ 606 /* decrease number of adapters in list */
1143 zfcp_data.adapters--; 607 zfcp_data.adapters--;
1144 608
1145 ZFCP_LOG_TRACE("adapter %s (%p) removed from list, " 609 zfcp_qdio_free(adapter);
1146 "%i adapters still in list\n",
1147 zfcp_get_busid_by_adapter(adapter),
1148 adapter, zfcp_data.adapters);
1149
1150 retval = qdio_free(adapter->ccw_device);
1151 if (retval)
1152 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
1153 zfcp_get_busid_by_adapter(adapter));
1154 610
1155 zfcp_free_low_mem_buffers(adapter); 611 zfcp_free_low_mem_buffers(adapter);
1156 /* free memory of adapter data structure and queues */ 612 kfree(adapter->req_list);
1157 zfcp_qdio_free_queues(adapter);
1158 zfcp_reqlist_free(adapter);
1159 kfree(adapter->fc_stats); 613 kfree(adapter->fc_stats);
1160 kfree(adapter->stats_reset_data); 614 kfree(adapter->stats_reset_data);
1161 ZFCP_LOG_TRACE("freeing adapter structure\n");
1162 kfree(adapter); 615 kfree(adapter);
1163 out: 616}
1164 return; 617
618static void zfcp_sysfs_port_release(struct device *dev)
619{
620 kfree(container_of(dev, struct zfcp_port, sysfs_device));
1165} 621}
1166 622
1167/** 623/**
@@ -1170,98 +626,90 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1170 * @wwpn: WWPN of the remote port to be enqueued 626 * @wwpn: WWPN of the remote port to be enqueued
1171 * @status: initial status for the port 627 * @status: initial status for the port
1172 * @d_id: destination id of the remote port to be enqueued 628 * @d_id: destination id of the remote port to be enqueued
1173 * Return: pointer to enqueued port on success, NULL on error 629 * Returns: pointer to enqueued port on success, ERR_PTR on error
1174 * Locks: config_sema must be held to serialize changes to the port list 630 * Locks: config_sema must be held to serialize changes to the port list
1175 * 631 *
1176 * All port internal structures are set up and the sysfs entry is generated. 632 * All port internal structures are set up and the sysfs entry is generated.
1177 * d_id is used to enqueue ports with a well known address like the Directory 633 * d_id is used to enqueue ports with a well known address like the Directory
1178 * Service for nameserver lookup. 634 * Service for nameserver lookup.
1179 */ 635 */
1180struct zfcp_port * 636struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
1181zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status, 637 u32 status, u32 d_id)
1182 u32 d_id)
1183{ 638{
1184 struct zfcp_port *port; 639 struct zfcp_port *port;
1185 int check_wwpn; 640 int retval;
1186 641 char *bus_id;
1187 check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
1188 /*
1189 * check that there is no port with this WWPN already in list
1190 */
1191 if (check_wwpn) {
1192 read_lock_irq(&zfcp_data.config_lock);
1193 port = zfcp_get_port_by_wwpn(adapter, wwpn);
1194 read_unlock_irq(&zfcp_data.config_lock);
1195 if (port)
1196 return NULL;
1197 }
1198 642
1199 port = kzalloc(sizeof (struct zfcp_port), GFP_KERNEL); 643 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
1200 if (!port) 644 if (!port)
1201 return NULL; 645 return ERR_PTR(-ENOMEM);
1202 646
1203 /* initialise reference count stuff */
1204 atomic_set(&port->refcount, 0);
1205 init_waitqueue_head(&port->remove_wq); 647 init_waitqueue_head(&port->remove_wq);
1206 648
1207 INIT_LIST_HEAD(&port->unit_list_head); 649 INIT_LIST_HEAD(&port->unit_list_head);
1208 INIT_LIST_HEAD(&port->unit_remove_lh); 650 INIT_LIST_HEAD(&port->unit_remove_lh);
1209 651
1210 port->adapter = adapter; 652 port->adapter = adapter;
653 port->d_id = d_id;
654 port->wwpn = wwpn;
1211 655
1212 if (check_wwpn) 656 /* mark port unusable as long as sysfs registration is not complete */
1213 port->wwpn = wwpn; 657 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
1214 658 atomic_set(&port->refcount, 0);
1215 atomic_set_mask(status, &port->status);
1216 659
1217 /* setup for sysfs registration */
1218 if (status & ZFCP_STATUS_PORT_WKA) { 660 if (status & ZFCP_STATUS_PORT_WKA) {
1219 switch (d_id) { 661 switch (d_id) {
1220 case ZFCP_DID_DIRECTORY_SERVICE: 662 case ZFCP_DID_DIRECTORY_SERVICE:
1221 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 663 bus_id = "directory";
1222 "directory");
1223 break; 664 break;
1224 case ZFCP_DID_MANAGEMENT_SERVICE: 665 case ZFCP_DID_MANAGEMENT_SERVICE:
1225 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 666 bus_id = "management";
1226 "management");
1227 break; 667 break;
1228 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE: 668 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
1229 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 669 bus_id = "key_distribution";
1230 "key_distribution");
1231 break; 670 break;
1232 case ZFCP_DID_ALIAS_SERVICE: 671 case ZFCP_DID_ALIAS_SERVICE:
1233 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 672 bus_id = "alias";
1234 "alias");
1235 break; 673 break;
1236 case ZFCP_DID_TIME_SERVICE: 674 case ZFCP_DID_TIME_SERVICE:
1237 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 675 bus_id = "time";
1238 "time");
1239 break; 676 break;
1240 default: 677 default:
1241 kfree(port); 678 kfree(port);
1242 return NULL; 679 return ERR_PTR(-EINVAL);
1243 } 680 }
1244 port->d_id = d_id; 681 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
1245 port->sysfs_device.parent = &adapter->generic_services; 682 port->sysfs_device.parent = &adapter->generic_services;
1246 } else { 683 } else {
1247 snprintf(port->sysfs_device.bus_id, 684 snprintf(port->sysfs_device.bus_id,
1248 BUS_ID_SIZE, "0x%016llx", wwpn); 685 BUS_ID_SIZE, "0x%016llx", wwpn);
1249 port->sysfs_device.parent = &adapter->ccw_device->dev; 686 port->sysfs_device.parent = &adapter->ccw_device->dev;
1250 } 687 }
688
1251 port->sysfs_device.release = zfcp_sysfs_port_release; 689 port->sysfs_device.release = zfcp_sysfs_port_release;
1252 dev_set_drvdata(&port->sysfs_device, port); 690 dev_set_drvdata(&port->sysfs_device, port);
1253 691
1254 /* mark port unusable as long as sysfs registration is not complete */ 692 read_lock_irq(&zfcp_data.config_lock);
1255 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 693 if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
694 if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
695 read_unlock_irq(&zfcp_data.config_lock);
696 goto err_out_free;
697 }
698 read_unlock_irq(&zfcp_data.config_lock);
1256 699
1257 if (device_register(&port->sysfs_device)) { 700 if (device_register(&port->sysfs_device))
1258 kfree(port); 701 goto err_out_free;
1259 return NULL; 702
1260 } 703 if (status & ZFCP_STATUS_PORT_WKA)
704 retval = sysfs_create_group(&port->sysfs_device.kobj,
705 &zfcp_sysfs_ns_port_attrs);
706 else
707 retval = sysfs_create_group(&port->sysfs_device.kobj,
708 &zfcp_sysfs_port_attrs);
1261 709
1262 if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) { 710 if (retval) {
1263 device_unregister(&port->sysfs_device); 711 device_unregister(&port->sysfs_device);
1264 return NULL; 712 goto err_out;
1265 } 713 }
1266 714
1267 zfcp_port_get(port); 715 zfcp_port_get(port);
@@ -1274,15 +722,23 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1274 if (!adapter->nameserver_port) 722 if (!adapter->nameserver_port)
1275 adapter->nameserver_port = port; 723 adapter->nameserver_port = port;
1276 adapter->ports++; 724 adapter->ports++;
725
1277 write_unlock_irq(&zfcp_data.config_lock); 726 write_unlock_irq(&zfcp_data.config_lock);
1278 727
1279 zfcp_adapter_get(adapter); 728 zfcp_adapter_get(adapter);
1280
1281 return port; 729 return port;
730
731err_out_free:
732 kfree(port);
733err_out:
734 return ERR_PTR(-EINVAL);
1282} 735}
1283 736
1284void 737/**
1285zfcp_port_dequeue(struct zfcp_port *port) 738 * zfcp_port_dequeue - dequeues a port from the port list of the adapter
739 * @port: pointer to struct zfcp_port which should be removed
740 */
741void zfcp_port_dequeue(struct zfcp_port *port)
1286{ 742{
1287 zfcp_port_wait(port); 743 zfcp_port_wait(port);
1288 write_lock_irq(&zfcp_data.config_lock); 744 write_lock_irq(&zfcp_data.config_lock);
@@ -1293,546 +749,53 @@ zfcp_port_dequeue(struct zfcp_port *port)
1293 fc_remote_port_delete(port->rport); 749 fc_remote_port_delete(port->rport);
1294 port->rport = NULL; 750 port->rport = NULL;
1295 zfcp_adapter_put(port->adapter); 751 zfcp_adapter_put(port->adapter);
1296 zfcp_sysfs_port_remove_files(&port->sysfs_device, 752 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1297 atomic_read(&port->status)); 753 sysfs_remove_group(&port->sysfs_device.kobj,
1298 device_unregister(&port->sysfs_device); 754 &zfcp_sysfs_ns_port_attrs);
1299}
1300
1301/* Enqueues a nameserver port */
1302int
1303zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
1304{
1305 struct zfcp_port *port;
1306
1307 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
1308 ZFCP_DID_DIRECTORY_SERVICE);
1309 if (!port) {
1310 ZFCP_LOG_INFO("error: enqueue of nameserver port for "
1311 "adapter %s failed\n",
1312 zfcp_get_busid_by_adapter(adapter));
1313 return -ENXIO;
1314 }
1315 zfcp_port_put(port);
1316
1317 return 0;
1318}
1319
1320#undef ZFCP_LOG_AREA
1321
1322/****************************************************************/
1323/******* Fibre Channel Standard related Functions **************/
1324/****************************************************************/
1325
1326#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
1327
1328static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req)
1329{
1330 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1331 struct zfcp_adapter *adapter = fsf_req->adapter;
1332 struct fcp_rscn_head *fcp_rscn_head;
1333 struct fcp_rscn_element *fcp_rscn_element;
1334 struct zfcp_port *port;
1335 u16 i;
1336 u16 no_entries;
1337 u32 range_mask;
1338 unsigned long flags;
1339
1340 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload;
1341 fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload;
1342
1343 /* see FC-FS */
1344 no_entries = (fcp_rscn_head->payload_len / 4);
1345
1346 for (i = 1; i < no_entries; i++) {
1347 /* skip head and start with 1st element */
1348 fcp_rscn_element++;
1349 switch (fcp_rscn_element->addr_format) {
1350 case ZFCP_PORT_ADDRESS:
1351 range_mask = ZFCP_PORTS_RANGE_PORT;
1352 break;
1353 case ZFCP_AREA_ADDRESS:
1354 range_mask = ZFCP_PORTS_RANGE_AREA;
1355 break;
1356 case ZFCP_DOMAIN_ADDRESS:
1357 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
1358 break;
1359 case ZFCP_FABRIC_ADDRESS:
1360 range_mask = ZFCP_PORTS_RANGE_FABRIC;
1361 break;
1362 default:
1363 ZFCP_LOG_INFO("incoming RSCN with unknown "
1364 "address format\n");
1365 continue;
1366 }
1367 read_lock_irqsave(&zfcp_data.config_lock, flags);
1368 list_for_each_entry(port, &adapter->port_list_head, list) {
1369 if (atomic_test_mask
1370 (ZFCP_STATUS_PORT_WKA, &port->status))
1371 continue;
1372 /* Do we know this port? If not skip it. */
1373 if (!atomic_test_mask
1374 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1375 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1376 "port 0x%016Lx\n", port->wwpn);
1377 zfcp_erp_port_reopen(port,
1378 ZFCP_STATUS_COMMON_ERP_FAILED,
1379 82, fsf_req);
1380 continue;
1381 }
1382
1383 /*
1384 * FIXME: race: d_id might being invalidated
1385 * (...DID_DID reset)
1386 */
1387 if ((port->d_id & range_mask)
1388 == (fcp_rscn_element->nport_did & range_mask)) {
1389 ZFCP_LOG_TRACE("reopen did 0x%08x\n",
1390 fcp_rscn_element->nport_did);
1391 /*
1392 * Unfortunately, an RSCN does not specify the
1393 * type of change a target underwent. We assume
1394 * that it makes sense to reopen the link.
1395 * FIXME: Shall we try to find out more about
1396 * the target and link state before closing it?
1397 * How to accomplish this? (nameserver?)
1398 * Where would such code be put in?
1399 * (inside or outside erp)
1400 */
1401 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1402 "port 0x%016Lx\n", port->wwpn);
1403 zfcp_test_link(port);
1404 }
1405 }
1406 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1407 }
1408}
1409
1410static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req)
1411{
1412 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1413 struct zfcp_adapter *adapter = fsf_req->adapter;
1414 struct fsf_plogi *els_plogi;
1415 struct zfcp_port *port;
1416 unsigned long flags;
1417
1418 els_plogi = (struct fsf_plogi *) status_buffer->payload;
1419 read_lock_irqsave(&zfcp_data.config_lock, flags);
1420 list_for_each_entry(port, &adapter->port_list_head, list) {
1421 if (port->wwpn == (*(wwn_t *) &els_plogi->serv_param.wwpn))
1422 break;
1423 }
1424 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1425
1426 if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) {
1427 ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
1428 "with d_id 0x%06x on adapter %s\n",
1429 status_buffer->d_id,
1430 zfcp_get_busid_by_adapter(adapter));
1431 } else {
1432 zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req);
1433 }
1434}
1435
1436static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req)
1437{
1438 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1439 struct zfcp_adapter *adapter = fsf_req->adapter;
1440 struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
1441 struct zfcp_port *port;
1442 unsigned long flags;
1443
1444 read_lock_irqsave(&zfcp_data.config_lock, flags);
1445 list_for_each_entry(port, &adapter->port_list_head, list) {
1446 if (port->wwpn == els_logo->nport_wwpn)
1447 break;
1448 }
1449 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1450
1451 if (!port || (port->wwpn != els_logo->nport_wwpn)) {
1452 ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
1453 "with d_id 0x%06x on adapter %s\n",
1454 status_buffer->d_id,
1455 zfcp_get_busid_by_adapter(adapter));
1456 } else {
1457 zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req);
1458 }
1459}
1460
1461static void
1462zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1463 struct fsf_status_read_buffer *status_buffer)
1464{
1465 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1466 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1467 zfcp_get_busid_by_adapter(adapter));
1468
1469}
1470
1471void
1472zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1473{
1474 struct fsf_status_read_buffer *status_buffer;
1475 u32 els_type;
1476 struct zfcp_adapter *adapter;
1477
1478 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
1479 els_type = *(u32 *) (status_buffer->payload);
1480 adapter = fsf_req->adapter;
1481
1482 zfcp_san_dbf_event_incoming_els(fsf_req);
1483 if (els_type == LS_PLOGI)
1484 zfcp_fsf_incoming_els_plogi(fsf_req);
1485 else if (els_type == LS_LOGO)
1486 zfcp_fsf_incoming_els_logo(fsf_req);
1487 else if ((els_type & 0xffff0000) == LS_RSCN)
1488 /* we are only concerned with the command, not the length */
1489 zfcp_fsf_incoming_els_rscn(fsf_req);
1490 else
1491 zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
1492}
1493
1494
1495/**
1496 * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request
1497 * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data
1498 * @pool: pointer to mempool_t if non-null memory pool is used for allocation
1499 */
1500static int
1501zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
1502{
1503 struct zfcp_gid_pn_data *data;
1504
1505 if (pool != NULL) {
1506 data = mempool_alloc(pool, GFP_ATOMIC);
1507 if (likely(data != NULL)) {
1508 data->ct.pool = pool;
1509 }
1510 } else {
1511 data = kmem_cache_alloc(zfcp_data.gid_pn_cache, GFP_ATOMIC);
1512 }
1513
1514 if (NULL == data)
1515 return -ENOMEM;
1516
1517 memset(data, 0, sizeof(*data));
1518 sg_init_table(&data->req , 1);
1519 sg_init_table(&data->resp , 1);
1520 data->ct.req = &data->req;
1521 data->ct.resp = &data->resp;
1522 data->ct.req_count = data->ct.resp_count = 1;
1523 zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req));
1524 zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp));
1525
1526 *gid_pn = data;
1527 return 0;
1528}
1529
1530/**
1531 * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
1532 * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
1533 */
1534static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
1535{
1536 if (gid_pn->ct.pool)
1537 mempool_free(gid_pn, gid_pn->ct.pool);
1538 else 755 else
1539 kmem_cache_free(zfcp_data.gid_pn_cache, gid_pn); 756 sysfs_remove_group(&port->sysfs_device.kobj,
1540} 757 &zfcp_sysfs_port_attrs);
1541 758 device_unregister(&port->sysfs_device);
1542/**
1543 * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request
1544 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
1545 */
1546int
1547zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1548{
1549 int ret;
1550 struct ct_iu_gid_pn_req *ct_iu_req;
1551 struct zfcp_gid_pn_data *gid_pn;
1552 struct zfcp_adapter *adapter = erp_action->adapter;
1553
1554 ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn);
1555 if (ret < 0) {
1556 ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver "
1557 "request failed for adapter %s\n",
1558 zfcp_get_busid_by_adapter(adapter));
1559 goto out;
1560 }
1561
1562 /* setup nameserver request */
1563 ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req);
1564 ct_iu_req->header.revision = ZFCP_CT_REVISION;
1565 ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
1566 ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
1567 ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
1568 ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
1569 ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
1570 ct_iu_req->wwpn = erp_action->port->wwpn;
1571
1572 /* setup parameters for send generic command */
1573 gid_pn->ct.port = adapter->nameserver_port;
1574 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1575 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1576 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1577 gid_pn->port = erp_action->port;
1578
1579 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
1580 erp_action);
1581 if (ret) {
1582 ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request "
1583 "failed for adapter %s\n",
1584 zfcp_get_busid_by_adapter(adapter));
1585
1586 zfcp_gid_pn_buffers_free(gid_pn);
1587 }
1588
1589 out:
1590 return ret;
1591}
1592
1593/**
1594 * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request
1595 * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data
1596 */
1597static void zfcp_ns_gid_pn_handler(unsigned long data)
1598{
1599 struct zfcp_port *port;
1600 struct zfcp_send_ct *ct;
1601 struct ct_iu_gid_pn_req *ct_iu_req;
1602 struct ct_iu_gid_pn_resp *ct_iu_resp;
1603 struct zfcp_gid_pn_data *gid_pn;
1604
1605
1606 gid_pn = (struct zfcp_gid_pn_data *) data;
1607 port = gid_pn->port;
1608 ct = &gid_pn->ct;
1609 ct_iu_req = zfcp_sg_to_address(ct->req);
1610 ct_iu_resp = zfcp_sg_to_address(ct->resp);
1611
1612 if (ct->status != 0)
1613 goto failed;
1614
1615 if (zfcp_check_ct_response(&ct_iu_resp->header)) {
1616 /* FIXME: do we need some specific erp entry points */
1617 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
1618 goto failed;
1619 }
1620 /* paranoia */
1621 if (ct_iu_req->wwpn != port->wwpn) {
1622 ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver "
1623 "lookup does not match expected wwpn 0x%016Lx "
1624 "for adapter %s\n", ct_iu_req->wwpn, port->wwpn,
1625 zfcp_get_busid_by_port(port));
1626 goto mismatch;
1627 }
1628
1629 /* looks like a valid d_id */
1630 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
1631 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
1632 ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%06x\n",
1633 zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
1634 goto out;
1635
1636 mismatch:
1637 ZFCP_LOG_DEBUG("CT IUs do not match:\n");
1638 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req,
1639 sizeof(struct ct_iu_gid_pn_req));
1640 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp,
1641 sizeof(struct ct_iu_gid_pn_resp));
1642
1643 failed:
1644 ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn "
1645 "0x%016Lx for adapter %s\n",
1646 port->wwpn, zfcp_get_busid_by_port(port));
1647 out:
1648 zfcp_gid_pn_buffers_free(gid_pn);
1649 return;
1650} 759}
1651 760
1652/* reject CT_IU reason codes acc. to FC-GS-4 */
1653static const struct zfcp_rc_entry zfcp_ct_rc[] = {
1654 {0x01, "invalid command code"},
1655 {0x02, "invalid version level"},
1656 {0x03, "logical error"},
1657 {0x04, "invalid CT_IU size"},
1658 {0x05, "logical busy"},
1659 {0x07, "protocol error"},
1660 {0x09, "unable to perform command request"},
1661 {0x0b, "command not supported"},
1662 {0x0d, "server not available"},
1663 {0x0e, "session could not be established"},
1664 {0xff, "vendor specific error"},
1665 {0, NULL},
1666};
1667
1668/* LS_RJT reason codes acc. to FC-FS */
1669static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
1670 {0x01, "invalid LS_Command code"},
1671 {0x03, "logical error"},
1672 {0x05, "logical busy"},
1673 {0x07, "protocol error"},
1674 {0x09, "unable to perform command request"},
1675 {0x0b, "command not supported"},
1676 {0x0e, "command already in progress"},
1677 {0xff, "vendor specific error"},
1678 {0, NULL},
1679};
1680
1681/* reject reason codes according to FC-PH/FC-FS */
1682static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1683 {0x01, "invalid D_ID"},
1684 {0x02, "invalid S_ID"},
1685 {0x03, "Nx_Port not available, temporary"},
1686 {0x04, "Nx_Port not available, permament"},
1687 {0x05, "class not supported"},
1688 {0x06, "delimiter usage error"},
1689 {0x07, "TYPE not supported"},
1690 {0x08, "invalid Link_Control"},
1691 {0x09, "invalid R_CTL field"},
1692 {0x0a, "invalid F_CTL field"},
1693 {0x0b, "invalid OX_ID"},
1694 {0x0c, "invalid RX_ID"},
1695 {0x0d, "invalid SEQ_ID"},
1696 {0x0e, "invalid DF_CTL"},
1697 {0x0f, "invalid SEQ_CNT"},
1698 {0x10, "invalid parameter field"},
1699 {0x11, "exchange error"},
1700 {0x12, "protocol error"},
1701 {0x13, "incorrect length"},
1702 {0x14, "unsupported ACK"},
1703 {0x15, "class of service not supported by entity at FFFFFE"},
1704 {0x16, "login required"},
1705 {0x17, "excessive sequences attempted"},
1706 {0x18, "unable to establish exchange"},
1707 {0x1a, "fabric path not available"},
1708 {0x1b, "invalid VC_ID (class 4)"},
1709 {0x1c, "invalid CS_CTL field"},
1710 {0x1d, "insufficient resources for VC (class 4)"},
1711 {0x1f, "invalid class of service"},
1712 {0x20, "preemption request rejected"},
1713 {0x21, "preemption not enabled"},
1714 {0x22, "multicast error"},
1715 {0x23, "multicast error terminate"},
1716 {0x24, "process login required"},
1717 {0xff, "vendor specific reject"},
1718 {0, NULL},
1719};
1720
1721/** 761/**
1722 * zfcp_rc_description - return description for given reaon code 762 * zfcp_sg_free_table - free memory used by scatterlists
1723 * @code: reason code 763 * @sg: pointer to scatterlist
1724 * @rc_table: table of reason codes and descriptions 764 * @count: number of scatterlist which are to be free'ed
765 * the scatterlist are expected to reference pages always
1725 */ 766 */
1726static const char * 767void zfcp_sg_free_table(struct scatterlist *sg, int count)
1727zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1728{ 768{
1729 const char *descr = "unknown reason code"; 769 int i;
1730 770
1731 do { 771 for (i = 0; i < count; i++, sg++)
1732 if (code == rc_table->code) { 772 if (sg)
1733 descr = rc_table->description; 773 free_page((unsigned long) sg_virt(sg));
774 else
1734 break; 775 break;
1735 }
1736 rc_table++;
1737 } while (rc_table->code && rc_table->description);
1738
1739 return descr;
1740} 776}
1741 777
1742/** 778/**
1743 * zfcp_check_ct_response - evaluate reason code for CT_IU 779 * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
1744 * @rjt: response payload to an CT_IU request 780 * @sg: pointer to struct scatterlist
1745 * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code 781 * @count: number of scatterlists which should be assigned with buffers
782 * of size page
783 *
784 * Returns: 0 on success, -ENOMEM otherwise
1746 */ 785 */
1747int 786int zfcp_sg_setup_table(struct scatterlist *sg, int count)
1748zfcp_check_ct_response(struct ct_hdr *rjt)
1749{ 787{
1750 if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT) 788 void *addr;
1751 return 0; 789 int i;
1752 790
1753 if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) { 791 sg_init_table(sg, count);
1754 ZFCP_LOG_NORMAL("error: invalid Generic Service command/" 792 for (i = 0; i < count; i++, sg++) {
1755 "response code (0x%04hx)\n", 793 addr = (void *) get_zeroed_page(GFP_KERNEL);
1756 rjt->cmd_rsp_code); 794 if (!addr) {
1757 return 1; 795 zfcp_sg_free_table(sg, i);
796 return -ENOMEM;
797 }
798 sg_set_buf(sg, addr, PAGE_SIZE);
1758 } 799 }
1759 800 return 0;
1760 ZFCP_LOG_INFO("Generic Service command rejected\n");
1761 ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
1762 zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
1763 (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
1764 (u32) rjt->vendor_unique);
1765
1766 return 1;
1767}
1768
1769/**
1770 * zfcp_print_els_rjt - print reject parameter and description for ELS reject
1771 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1772 * @rc_table: table of reason codes and descriptions
1773 */
1774static void
1775zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1776 const struct zfcp_rc_entry *rc_table)
1777{
1778 ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
1779 zfcp_rc_description(rjt_par->reason_code, rc_table),
1780 (u32) rjt_par->action, (u32) rjt_par->reason_code,
1781 (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
1782}
1783
1784/**
1785 * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
1786 * @sq: status qualifier word
1787 * @rjt_par: reject parameter as described in FC-PH and FC-FS
1788 * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
1789 */
1790int
1791zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
1792{
1793 int ret = -EIO;
1794
1795 if (sq == FSF_IOSTAT_NPORT_RJT) {
1796 ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
1797 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1798 /* invalid d_id */
1799 if (rjt_par->reason_code == 0x01)
1800 ret = -EREMCHG;
1801 } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
1802 ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
1803 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1804 /* invalid d_id */
1805 if (rjt_par->reason_code == 0x01)
1806 ret = -EREMCHG;
1807 } else if (sq == FSF_IOSTAT_LS_RJT) {
1808 ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
1809 zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
1810 ret = -EREMOTEIO;
1811 } else
1812 ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
1813
1814 return ret;
1815}
1816
1817/**
1818 * zfcp_plogi_evaluate - evaluate PLOGI playload and copy important fields
1819 * into zfcp_port structure
1820 * @port: zfcp_port structure
1821 * @plogi: plogi payload
1822 */
1823void
1824zfcp_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
1825{
1826 port->maxframe_size = plogi->serv_param.common_serv_param[7] |
1827 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
1828 if (plogi->serv_param.class1_serv_param[0] & 0x80)
1829 port->supported_classes |= FC_COS_CLASS1;
1830 if (plogi->serv_param.class2_serv_param[0] & 0x80)
1831 port->supported_classes |= FC_COS_CLASS2;
1832 if (plogi->serv_param.class3_serv_param[0] & 0x80)
1833 port->supported_classes |= FC_COS_CLASS3;
1834 if (plogi->serv_param.class4_serv_param[0] & 0x80)
1835 port->supported_classes |= FC_COS_CLASS4;
1836} 801}
1837
1838#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 66d3b88844b0..391dd29749f8 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -1,64 +1,13 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Registration and callback for the s390 common I/O layer.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26static int zfcp_ccw_probe(struct ccw_device *);
27static void zfcp_ccw_remove(struct ccw_device *);
28static int zfcp_ccw_set_online(struct ccw_device *);
29static int zfcp_ccw_set_offline(struct ccw_device *);
30static int zfcp_ccw_notify(struct ccw_device *, int);
31static void zfcp_ccw_shutdown(struct ccw_device *);
32
33static struct ccw_device_id zfcp_ccw_device_id[] = {
34 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
35 ZFCP_CONTROL_UNIT_MODEL,
36 ZFCP_DEVICE_TYPE,
37 ZFCP_DEVICE_MODEL)},
38 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
39 ZFCP_CONTROL_UNIT_MODEL,
40 ZFCP_DEVICE_TYPE,
41 ZFCP_DEVICE_MODEL_PRIV)},
42 {},
43};
44
45static struct ccw_driver zfcp_ccw_driver = {
46 .owner = THIS_MODULE,
47 .name = ZFCP_NAME,
48 .ids = zfcp_ccw_device_id,
49 .probe = zfcp_ccw_probe,
50 .remove = zfcp_ccw_remove,
51 .set_online = zfcp_ccw_set_online,
52 .set_offline = zfcp_ccw_set_offline,
53 .notify = zfcp_ccw_notify,
54 .shutdown = zfcp_ccw_shutdown,
55 .driver = {
56 .groups = zfcp_driver_attr_groups,
57 },
58};
59
60MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
61
62/** 11/**
63 * zfcp_ccw_probe - probe function of zfcp driver 12 * zfcp_ccw_probe - probe function of zfcp driver
64 * @ccw_device: pointer to belonging ccw device 13 * @ccw_device: pointer to belonging ccw device
@@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
69 * In addition the nameserver port will be added to the ports of the adapter 18 * In addition the nameserver port will be added to the ports of the adapter
70 * and its sysfs representation will be created too. 19 * and its sysfs representation will be created too.
71 */ 20 */
72static int 21static int zfcp_ccw_probe(struct ccw_device *ccw_device)
73zfcp_ccw_probe(struct ccw_device *ccw_device)
74{ 22{
75 struct zfcp_adapter *adapter;
76 int retval = 0; 23 int retval = 0;
77 24
78 down(&zfcp_data.config_sema); 25 down(&zfcp_data.config_sema);
79 adapter = zfcp_adapter_enqueue(ccw_device); 26 if (zfcp_adapter_enqueue(ccw_device)) {
80 if (!adapter) 27 dev_err(&ccw_device->dev,
28 "Setup of data structures failed.\n");
81 retval = -EINVAL; 29 retval = -EINVAL;
82 else 30 }
83 ZFCP_LOG_DEBUG("Probed adapter %s\n",
84 zfcp_get_busid_by_adapter(adapter));
85 up(&zfcp_data.config_sema); 31 up(&zfcp_data.config_sema);
86 return retval; 32 return retval;
87} 33}
@@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device)
95 * ports that belong to this adapter. And in addition all resources of this 41 * ports that belong to this adapter. And in addition all resources of this
96 * adapter will be freed too. 42 * adapter will be freed too.
97 */ 43 */
98static void 44static void zfcp_ccw_remove(struct ccw_device *ccw_device)
99zfcp_ccw_remove(struct ccw_device *ccw_device)
100{ 45{
101 struct zfcp_adapter *adapter; 46 struct zfcp_adapter *adapter;
102 struct zfcp_port *port, *p; 47 struct zfcp_port *port, *p;
@@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
106 down(&zfcp_data.config_sema); 51 down(&zfcp_data.config_sema);
107 adapter = dev_get_drvdata(&ccw_device->dev); 52 adapter = dev_get_drvdata(&ccw_device->dev);
108 53
109 ZFCP_LOG_DEBUG("Removing adapter %s\n",
110 zfcp_get_busid_by_adapter(adapter));
111 write_lock_irq(&zfcp_data.config_lock); 54 write_lock_irq(&zfcp_data.config_lock);
112 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 55 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
113 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 56 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
@@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
145 * registered with the SCSI stack, that the QDIO queues will be set up 88 * registered with the SCSI stack, that the QDIO queues will be set up
146 * and that the adapter will be opened (asynchronously). 89 * and that the adapter will be opened (asynchronously).
147 */ 90 */
148static int 91static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
149zfcp_ccw_set_online(struct ccw_device *ccw_device)
150{ 92{
151 struct zfcp_adapter *adapter; 93 struct zfcp_adapter *adapter;
152 int retval; 94 int retval;
@@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
155 adapter = dev_get_drvdata(&ccw_device->dev); 97 adapter = dev_get_drvdata(&ccw_device->dev);
156 98
157 retval = zfcp_erp_thread_setup(adapter); 99 retval = zfcp_erp_thread_setup(adapter);
158 if (retval) { 100 if (retval)
159 ZFCP_LOG_INFO("error: start of error recovery thread for "
160 "adapter %s failed\n",
161 zfcp_get_busid_by_adapter(adapter));
162 goto out; 101 goto out;
163 }
164 102
165 retval = zfcp_adapter_scsi_register(adapter); 103 retval = zfcp_adapter_scsi_register(adapter);
166 if (retval) 104 if (retval)
@@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
191 * This function gets called by the common i/o layer and sets an adapter 129 * This function gets called by the common i/o layer and sets an adapter
192 * into state offline. 130 * into state offline.
193 */ 131 */
194static int 132static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
195zfcp_ccw_set_offline(struct ccw_device *ccw_device)
196{ 133{
197 struct zfcp_adapter *adapter; 134 struct zfcp_adapter *adapter;
198 135
@@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
206} 143}
207 144
208/** 145/**
209 * zfcp_ccw_notify 146 * zfcp_ccw_notify - ccw notify function
210 * @ccw_device: pointer to belonging ccw device 147 * @ccw_device: pointer to belonging ccw device
211 * @event: indicates if adapter was detached or attached 148 * @event: indicates if adapter was detached or attached
212 * 149 *
213 * This function gets called by the common i/o layer if an adapter has gone 150 * This function gets called by the common i/o layer if an adapter has gone
214 * or reappeared. 151 * or reappeared.
215 */ 152 */
216static int 153static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
217zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
218{ 154{
219 struct zfcp_adapter *adapter; 155 struct zfcp_adapter *adapter;
220 156
@@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
222 adapter = dev_get_drvdata(&ccw_device->dev); 158 adapter = dev_get_drvdata(&ccw_device->dev);
223 switch (event) { 159 switch (event) {
224 case CIO_GONE: 160 case CIO_GONE:
225 ZFCP_LOG_NORMAL("adapter %s: device gone\n", 161 dev_warn(&adapter->ccw_device->dev, "device gone\n");
226 zfcp_get_busid_by_adapter(adapter));
227 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 162 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
228 break; 163 break;
229 case CIO_NO_PATH: 164 case CIO_NO_PATH:
230 ZFCP_LOG_NORMAL("adapter %s: no path\n", 165 dev_warn(&adapter->ccw_device->dev, "no path\n");
231 zfcp_get_busid_by_adapter(adapter));
232 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 166 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
233 break; 167 break;
234 case CIO_OPER: 168 case CIO_OPER:
235 ZFCP_LOG_NORMAL("adapter %s: operational again\n", 169 dev_info(&adapter->ccw_device->dev, "operational again\n");
236 zfcp_get_busid_by_adapter(adapter));
237 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 170 zfcp_erp_modify_adapter_status(adapter, 11, NULL,
238 ZFCP_STATUS_COMMON_RUNNING, 171 ZFCP_STATUS_COMMON_RUNNING,
239 ZFCP_SET); 172 ZFCP_SET);
@@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
247} 180}
248 181
249/** 182/**
250 * zfcp_ccw_register - ccw register function 183 * zfcp_ccw_shutdown - handle shutdown from cio
251 * 184 * @cdev: device for adapter to shutdown.
252 * Registers the driver at the common i/o layer. This function will be called
253 * at module load time/system start.
254 */
255int __init
256zfcp_ccw_register(void)
257{
258 return ccw_driver_register(&zfcp_ccw_driver);
259}
260
261/**
262 * zfcp_ccw_shutdown - gets called on reboot/shutdown
263 *
264 * Makes sure that QDIO queues are down when the system gets stopped.
265 */ 185 */
266static void 186static void zfcp_ccw_shutdown(struct ccw_device *cdev)
267zfcp_ccw_shutdown(struct ccw_device *cdev)
268{ 187{
269 struct zfcp_adapter *adapter; 188 struct zfcp_adapter *adapter;
270 189
@@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev)
275 up(&zfcp_data.config_sema); 194 up(&zfcp_data.config_sema);
276} 195}
277 196
278#undef ZFCP_LOG_AREA 197static struct ccw_device_id zfcp_ccw_device_id[] = {
198 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
199 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
200 {},
201};
202
203MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
204
205static struct ccw_driver zfcp_ccw_driver = {
206 .owner = THIS_MODULE,
207 .name = "zfcp",
208 .ids = zfcp_ccw_device_id,
209 .probe = zfcp_ccw_probe,
210 .remove = zfcp_ccw_remove,
211 .set_online = zfcp_ccw_set_online,
212 .set_offline = zfcp_ccw_set_offline,
213 .notify = zfcp_ccw_notify,
214 .shutdown = zfcp_ccw_shutdown,
215};
216
217/**
218 * zfcp_ccw_register - ccw register function
219 *
220 * Registers the driver at the common i/o layer. This function will be called
221 * at module load time/system start.
222 */
223int __init zfcp_ccw_register(void)
224{
225 return ccw_driver_register(&zfcp_ccw_driver);
226}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
new file mode 100644
index 000000000000..ec2abceca6dc
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -0,0 +1,259 @@
1/*
2 * zfcp device driver
3 *
4 * Userspace interface for accessing the
5 * Access Control Lists / Control File Data Channel
6 *
7 * Copyright IBM Corporation 2008
8 */
9
10#include <linux/types.h>
11#include <linux/miscdevice.h>
12#include <asm/ccwdev.h>
13#include "zfcp_def.h"
14#include "zfcp_ext.h"
15#include "zfcp_fsf.h"
16
17#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
18#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
19#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
20#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
21#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
22
23#define ZFCP_CFDC_DOWNLOAD 0x00000001
24#define ZFCP_CFDC_UPLOAD 0x00000002
25#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
26
27#define ZFCP_CFDC_IOC_MAGIC 0xDD
28#define ZFCP_CFDC_IOC \
29 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
30
31/**
32 * struct zfcp_cfdc_data - data for ioctl cfdc interface
33 * @signature: request signature
34 * @devno: FCP adapter device number
35 * @command: command code
36 * @fsf_status: returns status of FSF command to userspace
37 * @fsf_status_qual: returned to userspace
38 * @payloads: access conflicts list
39 * @control_file: access control table
40 */
41struct zfcp_cfdc_data {
42 u32 signature;
43 u32 devno;
44 u32 command;
45 u32 fsf_status;
46 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
47 u8 payloads[256];
48 u8 control_file[0];
49};
50
51static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
52 void __user *user_buffer)
53{
54 unsigned int length;
55 unsigned int size = ZFCP_CFDC_MAX_SIZE;
56
57 while (size) {
58 length = min((unsigned int)size, sg->length);
59 if (copy_from_user(sg_virt(sg++), user_buffer, length))
60 return -EFAULT;
61 user_buffer += length;
62 size -= length;
63 }
64 return 0;
65}
66
67static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
68 struct scatterlist *sg)
69{
70 unsigned int length;
71 unsigned int size = ZFCP_CFDC_MAX_SIZE;
72
73 while (size) {
74 length = min((unsigned int) size, sg->length);
75 if (copy_to_user(user_buffer, sg_virt(sg++), length))
76 return -EFAULT;
77 user_buffer += length;
78 size -= length;
79 }
80 return 0;
81}
82
83static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
84{
85 struct zfcp_adapter *adapter = NULL, *cur_adapter;
86 struct ccw_dev_id dev_id;
87
88 read_lock_irq(&zfcp_data.config_lock);
89 list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
90 ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
91 if (dev_id.devno == devno) {
92 adapter = cur_adapter;
93 zfcp_adapter_get(adapter);
94 break;
95 }
96 }
97 read_unlock_irq(&zfcp_data.config_lock);
98 return adapter;
99}
100
101static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
102{
103 switch (command) {
104 case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
105 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
106 fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
107 break;
108 case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
109 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
110 fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
111 break;
112 case ZFCP_CFDC_CMND_FULL_ACCESS:
113 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
114 fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
115 break;
116 case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
117 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
118 fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
119 break;
120 case ZFCP_CFDC_CMND_UPLOAD:
121 fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
122 fsf_cfdc->option = 0;
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 return 0;
129}
130
131static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
132 u8 __user *control_file)
133{
134 int retval;
135 retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
136 if (retval)
137 return retval;
138
139 sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
140
141 if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
142 command & ZFCP_CFDC_DOWNLOAD) {
143 retval = zfcp_cfdc_copy_from_user(sg, control_file);
144 if (retval) {
145 zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
146 return -EFAULT;
147 }
148 }
149
150 return 0;
151}
152
153static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
154 struct zfcp_fsf_req *req)
155{
156 data->fsf_status = req->qtcb->header.fsf_status;
157 memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
158 sizeof(union fsf_status_qual));
159 memcpy(&data->payloads, &req->qtcb->bottom.support.els,
160 sizeof(req->qtcb->bottom.support.els));
161}
162
163static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
164 unsigned long buffer)
165{
166 struct zfcp_cfdc_data *data;
167 struct zfcp_cfdc_data __user *data_user;
168 struct zfcp_adapter *adapter;
169 struct zfcp_fsf_req *req;
170 struct zfcp_fsf_cfdc *fsf_cfdc;
171 int retval;
172
173 if (command != ZFCP_CFDC_IOC)
174 return -ENOTTY;
175
176 data_user = (void __user *) buffer;
177 if (!data_user)
178 return -EINVAL;
179
180 fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
181 if (!fsf_cfdc)
182 return -ENOMEM;
183
184 data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
185 if (!data) {
186 retval = -ENOMEM;
187 goto no_mem_sense;
188 }
189
190 retval = copy_from_user(data, data_user, sizeof(*data));
191 if (retval) {
192 retval = -EFAULT;
193 goto free_buffer;
194 }
195
196 if (data->signature != 0xCFDCACDF) {
197 retval = -EINVAL;
198 goto free_buffer;
199 }
200
201 retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
202
203 adapter = zfcp_cfdc_get_adapter(data->devno);
204 if (!adapter) {
205 retval = -ENXIO;
206 goto free_buffer;
207 }
208
209 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
210 data_user->control_file);
211 if (retval)
212 goto adapter_put;
213 req = zfcp_fsf_control_file(adapter, fsf_cfdc);
214 if (IS_ERR(req)) {
215 retval = PTR_ERR(req);
216 goto free_sg;
217 }
218
219 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
220 retval = -ENXIO;
221 goto free_fsf;
222 }
223
224 zfcp_cfdc_req_to_sense(data, req);
225 retval = copy_to_user(data_user, data, sizeof(*data_user));
226 if (retval) {
227 retval = -EFAULT;
228 goto free_fsf;
229 }
230
231 if (data->command & ZFCP_CFDC_UPLOAD)
232 retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
233 fsf_cfdc->sg);
234
235 free_fsf:
236 zfcp_fsf_req_free(req);
237 free_sg:
238 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
239 adapter_put:
240 zfcp_adapter_put(adapter);
241 free_buffer:
242 kfree(data);
243 no_mem_sense:
244 kfree(fsf_cfdc);
245 return retval;
246}
247
248static const struct file_operations zfcp_cfdc_fops = {
249 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
250#ifdef CONFIG_COMPAT
251 .compat_ioctl = zfcp_cfdc_dev_ioctl
252#endif
253};
254
255struct miscdevice zfcp_cfdc_misc = {
256 .minor = MISC_DYNAMIC_MINOR,
257 .name = "zfcp_cfdc",
258 .fops = &zfcp_cfdc_fops,
259};
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index c8bad675dbd1..fca48b88fc53 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Debug traces for zfcp.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include <linux/ctype.h> 9#include <linux/ctype.h>
@@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400);
29MODULE_PARM_DESC(dbfsize, 16MODULE_PARM_DESC(dbfsize,
30 "number of pages for each debug feature area (default 4)"); 17 "number of pages for each debug feature area (default 4)");
31 18
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33
34static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, 19static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
35 int level, char *from, int from_len) 20 int level, char *from, int from_len)
36{ 21{
@@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
186 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 171 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
187 response->fsf_req_status = fsf_req->status; 172 response->fsf_req_status = fsf_req->status;
188 response->sbal_first = fsf_req->sbal_first; 173 response->sbal_first = fsf_req->sbal_first;
189 response->sbal_curr = fsf_req->sbal_curr;
190 response->sbal_last = fsf_req->sbal_last; 174 response->sbal_last = fsf_req->sbal_last;
175 response->sbal_response = fsf_req->sbal_response;
191 response->pool = fsf_req->pool != NULL; 176 response->pool = fsf_req->pool != NULL;
192 response->erp_action = (unsigned long)fsf_req->erp_action; 177 response->erp_action = (unsigned long)fsf_req->erp_action;
193 178
@@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
268 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); 253 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
269 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); 254 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
270 255
271 rec->u.status.failed = adapter->status_read_failed; 256 rec->u.status.failed = atomic_read(&adapter->stat_miss);
272 if (status_buffer != NULL) { 257 if (status_buffer != NULL) {
273 rec->u.status.status_type = status_buffer->status_type; 258 rec->u.status.status_type = status_buffer->status_type;
274 rec->u.status.status_subtype = status_buffer->status_subtype; 259 rec->u.status.status_subtype = status_buffer->status_subtype;
@@ -312,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
312/** 297/**
313 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure 298 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
314 * @adapter: adapter affected by this QDIO related event 299 * @adapter: adapter affected by this QDIO related event
315 * @status: as passed by qdio module
316 * @qdio_error: as passed by qdio module 300 * @qdio_error: as passed by qdio module
317 * @siga_error: as passed by qdio module
318 * @sbal_index: first buffer with error condition, as passed by qdio module 301 * @sbal_index: first buffer with error condition, as passed by qdio module
319 * @sbal_count: number of buffers affected, as passed by qdio module 302 * @sbal_count: number of buffers affected, as passed by qdio module
320 */ 303 */
321void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 304void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
322 unsigned int qdio_error, unsigned int siga_error, 305 unsigned int qdio_error, int sbal_index,
323 int sbal_index, int sbal_count) 306 int sbal_count)
324{ 307{
325 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; 308 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
326 unsigned long flags; 309 unsigned long flags;
@@ -328,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
328 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 311 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
329 memset(r, 0, sizeof(*r)); 312 memset(r, 0, sizeof(*r));
330 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 313 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
331 r->u.qdio.status = status;
332 r->u.qdio.qdio_error = qdio_error; 314 r->u.qdio.qdio_error = qdio_error;
333 r->u.qdio.siga_error = siga_error;
334 r->u.qdio.sbal_index = sbal_index; 315 r->u.qdio.sbal_index = sbal_index;
335 r->u.qdio.sbal_count = sbal_count; 316 r->u.qdio.sbal_count = sbal_count;
336 debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); 317 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
@@ -355,8 +336,8 @@ static void zfcp_hba_dbf_view_response(char **p,
355 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); 336 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
356 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); 337 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
357 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); 338 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
358 zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
359 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); 339 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
340 zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
360 zfcp_dbf_out(p, "pool", "0x%02x", r->pool); 341 zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
361 342
362 switch (r->fsf_command) { 343 switch (r->fsf_command) {
@@ -413,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
413 394
414static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) 395static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
415{ 396{
416 zfcp_dbf_out(p, "status", "0x%08x", r->status);
417 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); 397 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
418 zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
419 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); 398 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
420 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
421} 400}
@@ -515,13 +494,13 @@ static const char *zfcp_rec_dbf_ids[] = {
515 [52] = "port boxed close unit", 494 [52] = "port boxed close unit",
516 [53] = "port boxed fcp", 495 [53] = "port boxed fcp",
517 [54] = "unit boxed fcp", 496 [54] = "unit boxed fcp",
518 [55] = "port access denied ct", 497 [55] = "port access denied",
519 [56] = "port access denied els", 498 [56] = "",
520 [57] = "port access denied open port", 499 [57] = "",
521 [58] = "port access denied close physical", 500 [58] = "",
522 [59] = "unit access denied open unit", 501 [59] = "unit access denied",
523 [60] = "shared unit access denied open unit", 502 [60] = "shared unit access denied open unit",
524 [61] = "unit access denied fcp", 503 [61] = "",
525 [62] = "request timeout", 504 [62] = "request timeout",
526 [63] = "adisc link test reject or timeout", 505 [63] = "adisc link test reject or timeout",
527 [64] = "adisc link test d_id changed", 506 [64] = "adisc link test d_id changed",
@@ -546,8 +525,8 @@ static const char *zfcp_rec_dbf_ids[] = {
546 [80] = "exclusive read-only unit access unsupported", 525 [80] = "exclusive read-only unit access unsupported",
547 [81] = "shared read-write unit access unsupported", 526 [81] = "shared read-write unit access unsupported",
548 [82] = "incoming rscn", 527 [82] = "incoming rscn",
549 [83] = "incoming plogi", 528 [83] = "incoming wwpn",
550 [84] = "incoming logo", 529 [84] = "",
551 [85] = "online", 530 [85] = "online",
552 [86] = "offline", 531 [86] = "offline",
553 [87] = "ccw device gone", 532 [87] = "ccw device gone",
@@ -586,8 +565,8 @@ static const char *zfcp_rec_dbf_ids[] = {
586 [120] = "unknown fsf command", 565 [120] = "unknown fsf command",
587 [121] = "no recommendation for status qualifier", 566 [121] = "no recommendation for status qualifier",
588 [122] = "status read physical port closed in error", 567 [122] = "status read physical port closed in error",
589 [123] = "fc service class not supported ct", 568 [123] = "fc service class not supported",
590 [124] = "fc service class not supported els", 569 [124] = "",
591 [125] = "need newer zfcp", 570 [125] = "need newer zfcp",
592 [126] = "need newer microcode", 571 [126] = "need newer microcode",
593 [127] = "arbitrated loop not supported", 572 [127] = "arbitrated loop not supported",
@@ -595,7 +574,7 @@ static const char *zfcp_rec_dbf_ids[] = {
595 [129] = "qtcb size mismatch", 574 [129] = "qtcb size mismatch",
596 [130] = "unknown fsf status ecd", 575 [130] = "unknown fsf status ecd",
597 [131] = "fcp request too big", 576 [131] = "fcp request too big",
598 [132] = "fc service class not supported fcp", 577 [132] = "",
599 [133] = "data direction not valid fcp", 578 [133] = "data direction not valid fcp",
600 [134] = "command length not valid fcp", 579 [134] = "command length not valid fcp",
601 [135] = "status read act update", 580 [135] = "status read act update",
@@ -603,13 +582,18 @@ static const char *zfcp_rec_dbf_ids[] = {
603 [137] = "hbaapi port open", 582 [137] = "hbaapi port open",
604 [138] = "hbaapi unit open", 583 [138] = "hbaapi unit open",
605 [139] = "hbaapi unit shutdown", 584 [139] = "hbaapi unit shutdown",
606 [140] = "qdio error", 585 [140] = "qdio error outbound",
607 [141] = "scsi host reset", 586 [141] = "scsi host reset",
608 [142] = "dismissing fsf request for recovery action", 587 [142] = "dismissing fsf request for recovery action",
609 [143] = "recovery action timed out", 588 [143] = "recovery action timed out",
610 [144] = "recovery action gone", 589 [144] = "recovery action gone",
611 [145] = "recovery action being processed", 590 [145] = "recovery action being processed",
612 [146] = "recovery action ready for next step", 591 [146] = "recovery action ready for next step",
592 [147] = "qdio error inbound",
593 [148] = "nameserver needed for port scan",
594 [149] = "port scan",
595 [150] = "ptp attach",
596 [151] = "port validation failed",
613}; 597};
614 598
615static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, 599static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
@@ -670,24 +654,20 @@ static struct debug_view zfcp_rec_dbf_view = {
670 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation 654 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
671 * @id2: identifier for event 655 * @id2: identifier for event
672 * @adapter: adapter 656 * @adapter: adapter
673 * @lock: non-zero value indicates that erp_lock has not yet been acquired 657 * This function assumes that the caller is holding erp_lock.
674 */ 658 */
675void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) 659void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
676{ 660{
677 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 661 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
678 unsigned long flags = 0; 662 unsigned long flags = 0;
679 struct list_head *entry; 663 struct list_head *entry;
680 unsigned ready = 0, running = 0, total; 664 unsigned ready = 0, running = 0, total;
681 665
682 if (lock)
683 read_lock_irqsave(&adapter->erp_lock, flags);
684 list_for_each(entry, &adapter->erp_ready_head) 666 list_for_each(entry, &adapter->erp_ready_head)
685 ready++; 667 ready++;
686 list_for_each(entry, &adapter->erp_running_head) 668 list_for_each(entry, &adapter->erp_running_head)
687 running++; 669 running++;
688 total = adapter->erp_total_count; 670 total = adapter->erp_total_count;
689 if (lock)
690 read_unlock_irqrestore(&adapter->erp_lock, flags);
691 671
692 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 672 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
693 memset(r, 0, sizeof(*r)); 673 memset(r, 0, sizeof(*r));
@@ -696,10 +676,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
696 r->u.thread.total = total; 676 r->u.thread.total = total;
697 r->u.thread.ready = ready; 677 r->u.thread.ready = ready;
698 r->u.thread.running = running; 678 r->u.thread.running = running;
699 debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); 679 debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
700 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 680 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
701} 681}
702 682
683/**
684 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
685 * @id2: identifier for event
686 * @adapter: adapter
687 * This function assumes that the caller does not hold erp_lock.
688 */
689void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
690{
691 unsigned long flags;
692
693 read_lock_irqsave(&adapter->erp_lock, flags);
694 zfcp_rec_dbf_event_thread(id2, adapter);
695 read_unlock_irqrestore(&adapter->erp_lock, flags);
696}
697
703static void zfcp_rec_dbf_event_target(u8 id2, void *ref, 698static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
704 struct zfcp_adapter *adapter, 699 struct zfcp_adapter *adapter,
705 atomic_t *status, atomic_t *erp_count, 700 atomic_t *status, atomic_t *erp_count,
@@ -823,7 +818,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
823 r->u.action.status = erp_action->status; 818 r->u.action.status = erp_action->status;
824 r->u.action.step = erp_action->step; 819 r->u.action.step = erp_action->step;
825 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 820 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
826 debug_event(adapter->rec_dbf, 4, r, sizeof(*r)); 821 debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
827 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 822 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
828} 823}
829 824
@@ -960,7 +955,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
960 955
961 zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, 956 zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
962 fc_host_port_id(adapter->scsi_host), 957 fc_host_port_id(adapter->scsi_host),
963 *(u8 *)buf->payload, (void *)buf->payload, 958 buf->payload.data[0], (void *)buf->payload.data,
964 length); 959 length);
965} 960}
966 961
@@ -1064,8 +1059,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
1064 if (fsf_req != NULL) { 1059 if (fsf_req != NULL) {
1065 fcp_rsp = (struct fcp_rsp_iu *) 1060 fcp_rsp = (struct fcp_rsp_iu *)
1066 &(fsf_req->qtcb->bottom.io.fcp_rsp); 1061 &(fsf_req->qtcb->bottom.io.fcp_rsp);
1067 fcp_rsp_info = 1062 fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
1068 zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
1069 fcp_sns_info = 1063 fcp_sns_info =
1070 zfcp_get_fcp_sns_info_ptr(fcp_rsp); 1064 zfcp_get_fcp_sns_info_ptr(fcp_rsp);
1071 1065
@@ -1279,5 +1273,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1279 adapter->hba_dbf = NULL; 1273 adapter->hba_dbf = NULL;
1280 adapter->rec_dbf = NULL; 1274 adapter->rec_dbf = NULL;
1281} 1275}
1282
1283#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 54c34e483457..0ddb18449d11 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread {
38 u32 total; 38 u32 total;
39 u32 ready; 39 u32 ready;
40 u32 running; 40 u32 running;
41} __attribute__ ((packed)); 41};
42 42
43struct zfcp_rec_dbf_record_target { 43struct zfcp_rec_dbf_record_target {
44 u64 ref; 44 u64 ref;
@@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target {
47 u64 wwpn; 47 u64 wwpn;
48 u64 fcp_lun; 48 u64 fcp_lun;
49 u32 erp_count; 49 u32 erp_count;
50} __attribute__ ((packed)); 50};
51 51
52struct zfcp_rec_dbf_record_trigger { 52struct zfcp_rec_dbf_record_trigger {
53 u8 want; 53 u8 want;
@@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger {
59 u64 action; 59 u64 action;
60 u64 wwpn; 60 u64 wwpn;
61 u64 fcp_lun; 61 u64 fcp_lun;
62} __attribute__ ((packed)); 62};
63 63
64struct zfcp_rec_dbf_record_action { 64struct zfcp_rec_dbf_record_action {
65 u32 status; 65 u32 status;
66 u32 step; 66 u32 step;
67 u64 action; 67 u64 action;
68 u64 fsf_req; 68 u64 fsf_req;
69} __attribute__ ((packed)); 69};
70 70
71struct zfcp_rec_dbf_record { 71struct zfcp_rec_dbf_record {
72 u8 id; 72 u8 id;
@@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record {
77 struct zfcp_rec_dbf_record_target target; 77 struct zfcp_rec_dbf_record_target target;
78 struct zfcp_rec_dbf_record_trigger trigger; 78 struct zfcp_rec_dbf_record_trigger trigger;
79 } u; 79 } u;
80} __attribute__ ((packed)); 80};
81 81
82enum { 82enum {
83 ZFCP_REC_DBF_ID_ACTION, 83 ZFCP_REC_DBF_ID_ACTION,
@@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response {
97 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; 97 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
98 u32 fsf_req_status; 98 u32 fsf_req_status;
99 u8 sbal_first; 99 u8 sbal_first;
100 u8 sbal_curr;
101 u8 sbal_last; 100 u8 sbal_last;
101 u8 sbal_response;
102 u8 pool; 102 u8 pool;
103 u64 erp_action; 103 u64 erp_action;
104 union { 104 union {
@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
139} __attribute__ ((packed)); 139} __attribute__ ((packed));
140 140
141struct zfcp_hba_dbf_record_qdio { 141struct zfcp_hba_dbf_record_qdio {
142 u32 status;
143 u32 qdio_error; 142 u32 qdio_error;
144 u32 siga_error;
145 u8 sbal_index; 143 u8 sbal_index;
146 u8 sbal_count; 144 u8 sbal_count;
147} __attribute__ ((packed)); 145} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index bda8c77b22da..67f45fc62f53 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Global definitions for the zfcp device driver.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -26,7 +13,6 @@
26 13
27#include <linux/init.h> 14#include <linux/init.h>
28#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
29#include <linux/miscdevice.h>
30#include <linux/major.h> 16#include <linux/major.h>
31#include <linux/blkdev.h> 17#include <linux/blkdev.h>
32#include <linux/delay.h> 18#include <linux/delay.h>
@@ -53,9 +39,6 @@
53 39
54/********************* GENERAL DEFINES *********************************/ 40/********************* GENERAL DEFINES *********************************/
55 41
56/* zfcp version number, it consists of major, minor, and patch-level number */
57#define ZFCP_VERSION "4.8.0"
58
59/** 42/**
60 * zfcp_sg_to_address - determine kernel address from struct scatterlist 43 * zfcp_sg_to_address - determine kernel address from struct scatterlist
61 * @list: struct scatterlist 44 * @list: struct scatterlist
@@ -93,11 +76,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
93#define ZFCP_DEVICE_MODEL 0x03 76#define ZFCP_DEVICE_MODEL 0x03
94#define ZFCP_DEVICE_MODEL_PRIV 0x04 77#define ZFCP_DEVICE_MODEL_PRIV 0x04
95 78
96/* allow as many chained SBALs as are supported by hardware */
97#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
98#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
99#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
100
101/* DMQ bug workaround: don't use last SBALE */ 79/* DMQ bug workaround: don't use last SBALE */
102#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 80#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
103 81
@@ -106,42 +84,17 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
106 84
107/* max. number of (data buffer) SBALEs in largest SBAL chain */ 85/* max. number of (data buffer) SBALEs in largest SBAL chain */
108#define ZFCP_MAX_SBALES_PER_REQ \ 86#define ZFCP_MAX_SBALES_PER_REQ \
109 (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) 87 (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
110 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ 88 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
111 89
112#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8) 90#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
113 /* max. number of (data buffer) SBALEs in largest SBAL chain 91 /* max. number of (data buffer) SBALEs in largest SBAL chain
114 multiplied with number of sectors per 4k block */ 92 multiplied with number of sectors per 4k block */
115 93
116/* FIXME(tune): free space should be one max. SBAL chain plus what? */
117#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
118 - (ZFCP_MAX_SBALS_PER_REQ + 4))
119
120#define ZFCP_SBAL_TIMEOUT (5*HZ)
121
122#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
123
124/* queue polling (values in microseconds) */
125#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
126#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
127#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
128#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
129
130#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
131#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
132
133/********************* FSF SPECIFIC DEFINES *********************************/ 94/********************* FSF SPECIFIC DEFINES *********************************/
134 95
135#define ZFCP_ULP_INFO_VERSION 26
136#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
137/* ATTENTION: value must not be used by hardware */ 96/* ATTENTION: value must not be used by hardware */
138#define FSF_QTCB_UNSOLICITED_STATUS 0x6305 97#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
139#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
140#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
141
142/* Do 1st retry in 1 second, then double the timeout for each following retry */
143#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1
144#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
145 98
146/* timeout value for "default timer" for fsf requests */ 99/* timeout value for "default timer" for fsf requests */
147#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 100#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
@@ -153,17 +106,9 @@ typedef unsigned long long fcp_lun_t;
153/* data length field may be at variable position in FCP-2 FCP_CMND IU */ 106/* data length field may be at variable position in FCP-2 FCP_CMND IU */
154typedef unsigned int fcp_dl_t; 107typedef unsigned int fcp_dl_t;
155 108
156#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
157
158/* timeout for name-server lookup (in seconds) */ 109/* timeout for name-server lookup (in seconds) */
159#define ZFCP_NS_GID_PN_TIMEOUT 10 110#define ZFCP_NS_GID_PN_TIMEOUT 10
160 111
161/* largest SCSI command we can process */
162/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
163#define ZFCP_MAX_SCSI_CMND_LENGTH 255
164/* maximum number of commands in LUN queue (tagged queueing) */
165#define ZFCP_CMND_PER_LUN 32
166
167/* task attribute values in FCP-2 FCP_CMND IU */ 112/* task attribute values in FCP-2 FCP_CMND IU */
168#define SIMPLE_Q 0 113#define SIMPLE_Q 0
169#define HEAD_OF_Q 1 114#define HEAD_OF_Q 1
@@ -224,9 +169,9 @@ struct fcp_rsp_iu {
224#define RSP_CODE_TASKMAN_FAILED 5 169#define RSP_CODE_TASKMAN_FAILED 5
225 170
226/* see fc-fs */ 171/* see fc-fs */
227#define LS_RSCN 0x61040000 172#define LS_RSCN 0x61
228#define LS_LOGO 0x05000000 173#define LS_LOGO 0x05
229#define LS_PLOGI 0x03000000 174#define LS_PLOGI 0x03
230 175
231struct fcp_rscn_head { 176struct fcp_rscn_head {
232 u8 command; 177 u8 command;
@@ -266,7 +211,6 @@ struct fcp_logo {
266 * FC-FS stuff 211 * FC-FS stuff
267 */ 212 */
268#define R_A_TOV 10 /* seconds */ 213#define R_A_TOV 10 /* seconds */
269#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
270 214
271#define ZFCP_LS_RLS 0x0f 215#define ZFCP_LS_RLS 0x0f
272#define ZFCP_LS_ADISC 0x52 216#define ZFCP_LS_ADISC 0x52
@@ -311,7 +255,10 @@ struct zfcp_rc_entry {
311#define ZFCP_CT_DIRECTORY_SERVICE 0xFC 255#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
312#define ZFCP_CT_NAME_SERVER 0x02 256#define ZFCP_CT_NAME_SERVER 0x02
313#define ZFCP_CT_SYNCHRONOUS 0x00 257#define ZFCP_CT_SYNCHRONOUS 0x00
258#define ZFCP_CT_SCSI_FCP 0x08
259#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
314#define ZFCP_CT_GID_PN 0x0121 260#define ZFCP_CT_GID_PN 0x0121
261#define ZFCP_CT_GPN_FT 0x0172
315#define ZFCP_CT_MAX_SIZE 0x1020 262#define ZFCP_CT_MAX_SIZE 0x1020
316#define ZFCP_CT_ACCEPT 0x8002 263#define ZFCP_CT_ACCEPT 0x8002
317#define ZFCP_CT_REJECT 0x8001 264#define ZFCP_CT_REJECT 0x8001
@@ -321,107 +268,6 @@ struct zfcp_rc_entry {
321 */ 268 */
322#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) 269#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
323 270
324/******************** LOGGING MACROS AND DEFINES *****************************/
325
326/*
327 * Logging may be applied on certain kinds of driver operations
328 * independently. Additionally, different log-levels are supported for
329 * each of these areas.
330 */
331
332#define ZFCP_NAME "zfcp"
333
334/* independent log areas */
335#define ZFCP_LOG_AREA_OTHER 0
336#define ZFCP_LOG_AREA_SCSI 1
337#define ZFCP_LOG_AREA_FSF 2
338#define ZFCP_LOG_AREA_CONFIG 3
339#define ZFCP_LOG_AREA_CIO 4
340#define ZFCP_LOG_AREA_QDIO 5
341#define ZFCP_LOG_AREA_ERP 6
342#define ZFCP_LOG_AREA_FC 7
343
344/* log level values*/
345#define ZFCP_LOG_LEVEL_NORMAL 0
346#define ZFCP_LOG_LEVEL_INFO 1
347#define ZFCP_LOG_LEVEL_DEBUG 2
348#define ZFCP_LOG_LEVEL_TRACE 3
349
350/*
351 * this allows removal of logging code by the preprocessor
352 * (the most detailed log level still to be compiled in is specified,
353 * higher log levels are removed)
354 */
355#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE
356
357/* get "loglevel" nibble assignment */
358#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
359 ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
360
361/* set "loglevel" nibble */
362#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
363 (value << (zfcp_lognibble << 2))
364
365/* all log-level defaults are combined to generate initial log-level */
366#define ZFCP_LOG_LEVEL_DEFAULTS \
367 (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
368 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
369 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
370 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
371 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
372 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
373 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
374 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
375
376/* check whether we have the right level for logging */
377#define ZFCP_LOG_CHECK(level) \
378 ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
379
380/* logging routine for zfcp */
381#define _ZFCP_LOG(fmt, args...) \
382 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
383 __LINE__ , ##args)
384
385#define ZFCP_LOG(level, fmt, args...) \
386do { \
387 if (ZFCP_LOG_CHECK(level)) \
388 _ZFCP_LOG(fmt, ##args); \
389} while (0)
390
391#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
392# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
393#else
394# define ZFCP_LOG_NORMAL(fmt, args...) \
395do { \
396 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
397 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
398} while (0)
399#endif
400
401#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
402# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
403#else
404# define ZFCP_LOG_INFO(fmt, args...) \
405do { \
406 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
407 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
408} while (0)
409#endif
410
411#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
412# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
413#else
414# define ZFCP_LOG_DEBUG(fmt, args...) \
415 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
416#endif
417
418#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
419# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
420#else
421# define ZFCP_LOG_TRACE(fmt, args...) \
422 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
423#endif
424
425/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 271/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
426 272
427/* 273/*
@@ -441,6 +287,7 @@ do { \
441#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 287#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
442#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 288#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
443#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 289#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
290#define ZFCP_STATUS_COMMON_NOESC 0x00200000
444 291
445/* adapter status */ 292/* adapter status */
446#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 293#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
@@ -496,77 +343,6 @@ do { \
496#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800 343#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
497#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 344#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
498 345
499/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
500
501#define ZFCP_MAX_ERPS 3
502
503#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ)
504#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
505
506#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
507#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
508#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
509#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
510#define ZFCP_STATUS_ERP_LOWMEM 0x00400000
511
512#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
513#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
514#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
515#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
516#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
517#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
518#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
519#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
520#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
521
522/* Ordered by escalation level (necessary for proper erp-code operation) */
523#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
524#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
525#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
526#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
527
528#define ZFCP_ERP_ACTION_RUNNING 0x1
529#define ZFCP_ERP_ACTION_READY 0x2
530
531#define ZFCP_ERP_SUCCEEDED 0x0
532#define ZFCP_ERP_FAILED 0x1
533#define ZFCP_ERP_CONTINUES 0x2
534#define ZFCP_ERP_EXIT 0x3
535#define ZFCP_ERP_DISMISSED 0x4
536#define ZFCP_ERP_NOMEM 0x5
537
538
539/******************** CFDC SPECIFIC STUFF *****************************/
540
541/* Firewall data channel sense data record */
542struct zfcp_cfdc_sense_data {
543 u32 signature; /* Request signature */
544 u32 devno; /* FCP adapter device number */
545 u32 command; /* Command code */
546 u32 fsf_status; /* FSF request status and status qualifier */
547 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
548 u8 payloads[256]; /* Access conflicts list */
549 u8 control_file[0]; /* Access control table */
550};
551
552#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
553
554#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
555#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
556#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
557#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
558#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
559
560#define ZFCP_CFDC_DOWNLOAD 0x00000001
561#define ZFCP_CFDC_UPLOAD 0x00000002
562#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
563
564#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
565#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
566#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
567
568#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
569
570/************************* STRUCTURE DEFINITIONS *****************************/ 346/************************* STRUCTURE DEFINITIONS *****************************/
571 347
572struct zfcp_fsf_req; 348struct zfcp_fsf_req;
@@ -623,7 +399,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
623 * @resp_count: number of elements in response scatter-gather list 399 * @resp_count: number of elements in response scatter-gather list
624 * @handler: handler function (called for response to the request) 400 * @handler: handler function (called for response to the request)
625 * @handler_data: data passed to handler function 401 * @handler_data: data passed to handler function
626 * @pool: pointer to memory pool for ct request structure
627 * @timeout: FSF timeout for this request 402 * @timeout: FSF timeout for this request
628 * @completion: completion for synchronization purposes 403 * @completion: completion for synchronization purposes
629 * @status: used to pass error status to calling function 404 * @status: used to pass error status to calling function
@@ -636,7 +411,6 @@ struct zfcp_send_ct {
636 unsigned int resp_count; 411 unsigned int resp_count;
637 zfcp_send_ct_handler_t handler; 412 zfcp_send_ct_handler_t handler;
638 unsigned long handler_data; 413 unsigned long handler_data;
639 mempool_t *pool;
640 int timeout; 414 int timeout;
641 struct completion *completion; 415 struct completion *completion;
642 int status; 416 int status;
@@ -685,13 +459,13 @@ struct zfcp_send_els {
685}; 459};
686 460
687struct zfcp_qdio_queue { 461struct zfcp_qdio_queue {
688 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 462 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
689 u8 free_index; /* index of next free bfr 463 u8 first; /* index of next free bfr
690 in queue (free_count>0) */ 464 in queue (free_count>0) */
691 atomic_t free_count; /* number of free buffers 465 atomic_t count; /* number of free buffers
692 in queue */ 466 in queue */
693 rwlock_t queue_lock; /* lock for operations on queue */ 467 spinlock_t lock; /* lock for operations on queue */
694 int distance_from_int; /* SBALs used since PCI indication 468 int pci_batch; /* SBALs since PCI indication
695 was last set */ 469 was last set */
696}; 470};
697 471
@@ -708,6 +482,24 @@ struct zfcp_erp_action {
708 struct timer_list timer; 482 struct timer_list timer;
709}; 483};
710 484
485struct fsf_latency_record {
486 u32 min;
487 u32 max;
488 u64 sum;
489};
490
491struct latency_cont {
492 struct fsf_latency_record channel;
493 struct fsf_latency_record fabric;
494 u64 counter;
495};
496
497struct zfcp_latencies {
498 struct latency_cont read;
499 struct latency_cont write;
500 struct latency_cont cmd;
501 spinlock_t lock;
502};
711 503
712struct zfcp_adapter { 504struct zfcp_adapter {
713 struct list_head list; /* list of adapters */ 505 struct list_head list; /* list of adapters */
@@ -723,24 +515,25 @@ struct zfcp_adapter {
723 u32 adapter_features; /* FCP channel features */ 515 u32 adapter_features; /* FCP channel features */
724 u32 connection_features; /* host connection features */ 516 u32 connection_features; /* host connection features */
725 u32 hardware_version; /* of FCP channel */ 517 u32 hardware_version; /* of FCP channel */
518 u16 timer_ticks; /* time int for a tick */
726 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 519 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
727 struct list_head port_list_head; /* remote port list */ 520 struct list_head port_list_head; /* remote port list */
728 struct list_head port_remove_lh; /* head of ports to be 521 struct list_head port_remove_lh; /* head of ports to be
729 removed */ 522 removed */
730 u32 ports; /* number of remote ports */ 523 u32 ports; /* number of remote ports */
731 atomic_t reqs_active; /* # active FSF reqs */
732 unsigned long req_no; /* unique FSF req number */ 524 unsigned long req_no; /* unique FSF req number */
733 struct list_head *req_list; /* list of pending reqs */ 525 struct list_head *req_list; /* list of pending reqs */
734 spinlock_t req_list_lock; /* request list lock */ 526 spinlock_t req_list_lock; /* request list lock */
735 struct zfcp_qdio_queue request_queue; /* request queue */ 527 struct zfcp_qdio_queue req_q; /* request queue */
736 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 528 u32 fsf_req_seq_no; /* FSF cmnd seq number */
737 wait_queue_head_t request_wq; /* can be used to wait for 529 wait_queue_head_t request_wq; /* can be used to wait for
738 more avaliable SBALs */ 530 more avaliable SBALs */
739 struct zfcp_qdio_queue response_queue; /* response queue */ 531 struct zfcp_qdio_queue resp_q; /* response queue */
740 rwlock_t abort_lock; /* Protects against SCSI 532 rwlock_t abort_lock; /* Protects against SCSI
741 stack abort/command 533 stack abort/command
742 completion races */ 534 completion races */
743 u16 status_read_failed; /* # failed status reads */ 535 atomic_t stat_miss; /* # missing status reads*/
536 struct work_struct stat_work;
744 atomic_t status; /* status of this adapter */ 537 atomic_t status; /* status of this adapter */
745 struct list_head erp_ready_head; /* error recovery for this 538 struct list_head erp_ready_head; /* error recovery for this
746 adapter/devices */ 539 adapter/devices */
@@ -774,13 +567,9 @@ struct zfcp_adapter {
774 struct fc_host_statistics *fc_stats; 567 struct fc_host_statistics *fc_stats;
775 struct fsf_qtcb_bottom_port *stats_reset_data; 568 struct fsf_qtcb_bottom_port *stats_reset_data;
776 unsigned long stats_reset; 569 unsigned long stats_reset;
570 struct work_struct scan_work;
777}; 571};
778 572
779/*
780 * the struct device sysfs_device must be at the beginning of this structure.
781 * pointer to struct device is used to free port structure in release function
782 * of the device. don't change!
783 */
784struct zfcp_port { 573struct zfcp_port {
785 struct device sysfs_device; /* sysfs device */ 574 struct device sysfs_device; /* sysfs device */
786 struct fc_rport *rport; /* rport of fc transport class */ 575 struct fc_rport *rport; /* rport of fc transport class */
@@ -804,10 +593,6 @@ struct zfcp_port {
804 u32 supported_classes; 593 u32 supported_classes;
805}; 594};
806 595
807/* the struct device sysfs_device must be at the beginning of this structure.
808 * pointer to struct device is used to free unit structure in release function
809 * of the device. don't change!
810 */
811struct zfcp_unit { 596struct zfcp_unit {
812 struct device sysfs_device; /* sysfs device */ 597 struct device sysfs_device; /* sysfs device */
813 struct list_head list; /* list of logical units */ 598 struct list_head list; /* list of logical units */
@@ -822,6 +607,7 @@ struct zfcp_unit {
822 struct scsi_device *device; /* scsi device struct pointer */ 607 struct scsi_device *device; /* scsi device struct pointer */
823 struct zfcp_erp_action erp_action; /* pending error recovery */ 608 struct zfcp_erp_action erp_action; /* pending error recovery */
824 atomic_t erp_counter; 609 atomic_t erp_counter;
610 struct zfcp_latencies latencies;
825}; 611};
826 612
827/* FSF request */ 613/* FSF request */
@@ -831,19 +617,19 @@ struct zfcp_fsf_req {
831 struct zfcp_adapter *adapter; /* adapter request belongs to */ 617 struct zfcp_adapter *adapter; /* adapter request belongs to */
832 u8 sbal_number; /* nr of SBALs free for use */ 618 u8 sbal_number; /* nr of SBALs free for use */
833 u8 sbal_first; /* first SBAL for this request */ 619 u8 sbal_first; /* first SBAL for this request */
834 u8 sbal_last; /* last possible SBAL for 620 u8 sbal_last; /* last SBAL for this request */
621 u8 sbal_limit; /* last possible SBAL for
835 this reuest */ 622 this reuest */
836 u8 sbal_curr; /* current SBAL during creation
837 of request */
838 u8 sbale_curr; /* current SBALE during creation 623 u8 sbale_curr; /* current SBALE during creation
839 of request */ 624 of request */
625 u8 sbal_response; /* SBAL used in interrupt */
840 wait_queue_head_t completion_wq; /* can be used by a routine 626 wait_queue_head_t completion_wq; /* can be used by a routine
841 to wait for completion */ 627 to wait for completion */
842 volatile u32 status; /* status of this request */ 628 volatile u32 status; /* status of this request */
843 u32 fsf_command; /* FSF Command copy */ 629 u32 fsf_command; /* FSF Command copy */
844 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 630 struct fsf_qtcb *qtcb; /* address of associated QTCB */
845 u32 seq_no; /* Sequence number of request */ 631 u32 seq_no; /* Sequence number of request */
846 unsigned long data; /* private data of request */ 632 void *data; /* private data of request */
847 struct timer_list timer; /* used for erp or scsi er */ 633 struct timer_list timer; /* used for erp or scsi er */
848 struct zfcp_erp_action *erp_action; /* used if this request is 634 struct zfcp_erp_action *erp_action; /* used if this request is
849 issued on behalf of erp */ 635 issued on behalf of erp */
@@ -851,10 +637,9 @@ struct zfcp_fsf_req {
851 from emergency pool */ 637 from emergency pool */
852 unsigned long long issued; /* request sent time (STCK) */ 638 unsigned long long issued; /* request sent time (STCK) */
853 struct zfcp_unit *unit; 639 struct zfcp_unit *unit;
640 void (*handler)(struct zfcp_fsf_req *);
854}; 641};
855 642
856typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
857
858/* driver data */ 643/* driver data */
859struct zfcp_data { 644struct zfcp_data {
860 struct scsi_host_template scsi_host_template; 645 struct scsi_host_template scsi_host_template;
@@ -873,29 +658,11 @@ struct zfcp_data {
873 char init_busid[BUS_ID_SIZE]; 658 char init_busid[BUS_ID_SIZE];
874 wwn_t init_wwpn; 659 wwn_t init_wwpn;
875 fcp_lun_t init_fcp_lun; 660 fcp_lun_t init_fcp_lun;
876 char *driver_version;
877 struct kmem_cache *fsf_req_qtcb_cache; 661 struct kmem_cache *fsf_req_qtcb_cache;
878 struct kmem_cache *sr_buffer_cache; 662 struct kmem_cache *sr_buffer_cache;
879 struct kmem_cache *gid_pn_cache; 663 struct kmem_cache *gid_pn_cache;
880}; 664};
881 665
882/**
883 * struct zfcp_sg_list - struct describing a scatter-gather list
884 * @sg: pointer to array of (struct scatterlist)
885 * @count: number of elements in scatter-gather list
886 */
887struct zfcp_sg_list {
888 struct scatterlist *sg;
889 unsigned int count;
890};
891
892/* number of elements for various memory pools */
893#define ZFCP_POOL_FSF_REQ_ERP_NR 1
894#define ZFCP_POOL_FSF_REQ_SCSI_NR 1
895#define ZFCP_POOL_FSF_REQ_ABORT_NR 1
896#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM
897#define ZFCP_POOL_DATA_GID_PN_NR 1
898
899/* struct used by memory pools for fsf_requests */ 666/* struct used by memory pools for fsf_requests */
900struct zfcp_fsf_req_qtcb { 667struct zfcp_fsf_req_qtcb {
901 struct zfcp_fsf_req fsf_req; 668 struct zfcp_fsf_req fsf_req;
@@ -905,7 +672,6 @@ struct zfcp_fsf_req_qtcb {
905/********************** ZFCP SPECIFIC DEFINES ********************************/ 672/********************** ZFCP SPECIFIC DEFINES ********************************/
906 673
907#define ZFCP_REQ_AUTO_CLEANUP 0x00000002 674#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
908#define ZFCP_WAIT_FOR_SBAL 0x00000004
909#define ZFCP_REQ_NO_QTCB 0x00000008 675#define ZFCP_REQ_NO_QTCB 0x00000008
910 676
911#define ZFCP_SET 0x00000100 677#define ZFCP_SET 0x00000100
@@ -916,12 +682,6 @@ struct zfcp_fsf_req_qtcb {
916 ((atomic_read(target) & mask) == mask) 682 ((atomic_read(target) & mask) == mask)
917#endif 683#endif
918 684
919extern void _zfcp_hex_dump(char *, int);
920#define ZFCP_HEX_DUMP(level, addr, count) \
921 if (ZFCP_LOG_CHECK(level)) { \
922 _zfcp_hex_dump(addr, count); \
923 }
924
925#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) 685#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
926#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter)) 686#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
927#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port)) 687#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
@@ -934,15 +694,6 @@ static inline int zfcp_reqlist_hash(unsigned long req_id)
934 return req_id % REQUEST_LIST_SIZE; 694 return req_id % REQUEST_LIST_SIZE;
935} 695}
936 696
937static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
938 struct zfcp_fsf_req *fsf_req)
939{
940 unsigned int idx;
941
942 idx = zfcp_reqlist_hash(fsf_req->req_id);
943 list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
944}
945
946static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter, 697static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
947 struct zfcp_fsf_req *fsf_req) 698 struct zfcp_fsf_req *fsf_req)
948{ 699{
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 805484658dd9..643ac4bba5b5 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1,641 +1,406 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Error Recovery Procedures (ERP).
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
23
24#include "zfcp_ext.h" 9#include "zfcp_ext.h"
25 10
26static int zfcp_erp_adisc(struct zfcp_port *); 11#define ZFCP_MAX_ERPS 3
27static void zfcp_erp_adisc_handler(unsigned long);
28
29static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8,
30 void *);
31static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8,
32 void *);
33static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *);
34static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *);
35
36static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8,
37 void *);
38static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8,
39 void *);
40
41static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
42static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
43static void zfcp_erp_port_block(struct zfcp_port *, int);
44static void zfcp_erp_port_unblock(struct zfcp_port *);
45static void zfcp_erp_unit_block(struct zfcp_unit *, int);
46static void zfcp_erp_unit_unblock(struct zfcp_unit *);
47
48static int zfcp_erp_thread(void *);
49
50static int zfcp_erp_strategy(struct zfcp_erp_action *);
51
52static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *);
53static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *);
54static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int);
55static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int);
56static int zfcp_erp_strategy_check_port(struct zfcp_port *, int);
57static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
58static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
59 struct zfcp_port *,
60 struct zfcp_unit *, int);
61static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
62static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
63 struct zfcp_port *,
64 struct zfcp_unit *, int);
65static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *);
66static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
67
68static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
70static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
72static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
73static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
74static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
75static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
76static int zfcp_erp_adapter_strategy_open_fsf_statusread(
77 struct zfcp_erp_action *);
78
79static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *);
80static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *);
81
82static int zfcp_erp_port_strategy(struct zfcp_erp_action *);
83static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *);
84static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *);
85static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *);
86static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *);
87static int zfcp_erp_port_strategy_open_nameserver_wakeup(
88 struct zfcp_erp_action *);
89static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *);
90static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *);
91static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *);
92
93static int zfcp_erp_unit_strategy(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
95static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
96static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
97
98static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
99static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
100static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
101static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
102
103static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
104 struct zfcp_port *, struct zfcp_unit *,
105 u8 id, void *ref);
106static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
107static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
108 struct zfcp_port *, struct zfcp_unit *,
109 int);
110
111static void zfcp_erp_action_ready(struct zfcp_erp_action *);
112static int zfcp_erp_action_exists(struct zfcp_erp_action *);
113
114static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
115static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
116
117static void zfcp_erp_memwait_handler(unsigned long);
118 12
119/** 13enum zfcp_erp_act_flags {
120 * zfcp_close_qdio - close qdio queues for an adapter 14 ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
121 */ 15 ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
122static void zfcp_close_qdio(struct zfcp_adapter *adapter) 16 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
123{ 17 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
124 struct zfcp_qdio_queue *req_queue; 18 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
125 int first, count; 19};
126 20
127 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 21enum zfcp_erp_steps {
128 return; 22 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
26 ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
27 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
28 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
29 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
30 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000,
31};
129 32
130 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 33enum zfcp_erp_act_type {
131 req_queue = &adapter->request_queue; 34 ZFCP_ERP_ACTION_REOPEN_UNIT = 1,
132 write_lock_irq(&req_queue->queue_lock); 35 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
133 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 36 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
134 write_unlock_irq(&req_queue->queue_lock); 37 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
135 38};
136 while (qdio_shutdown(adapter->ccw_device, 39
137 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) 40enum zfcp_erp_act_state {
138 ssleep(1); 41 ZFCP_ERP_ACTION_RUNNING = 1,
139 42 ZFCP_ERP_ACTION_READY = 2,
140 /* cleanup used outbound sbals */ 43};
141 count = atomic_read(&req_queue->free_count); 44
142 if (count < QDIO_MAX_BUFFERS_PER_Q) { 45enum zfcp_erp_act_result {
143 first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q; 46 ZFCP_ERP_SUCCEEDED = 0,
144 count = QDIO_MAX_BUFFERS_PER_Q - count; 47 ZFCP_ERP_FAILED = 1,
145 zfcp_qdio_zero_sbals(req_queue->buffer, first, count); 48 ZFCP_ERP_CONTINUES = 2,
146 } 49 ZFCP_ERP_EXIT = 3,
147 req_queue->free_index = 0; 50 ZFCP_ERP_DISMISSED = 4,
148 atomic_set(&req_queue->free_count, 0); 51 ZFCP_ERP_NOMEM = 5,
149 req_queue->distance_from_int = 0; 52};
150 adapter->response_queue.free_index = 0; 53
151 atomic_set(&adapter->response_queue.free_count, 0); 54static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
55{
56 zfcp_erp_modify_adapter_status(adapter, 15, NULL,
57 ZFCP_STATUS_COMMON_UNBLOCKED | mask,
58 ZFCP_CLEAR);
152} 59}
153 60
154/** 61static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
155 * zfcp_close_fsf - stop FSF operations for an adapter
156 *
157 * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
158 * requests waiting for completion; especially this returns SCSI commands
159 * with error state).
160 */
161static void zfcp_close_fsf(struct zfcp_adapter *adapter)
162{ 62{
163 /* close queues to ensure that buffers are not accessed by adapter */ 63 struct zfcp_erp_action *curr_act;
164 zfcp_close_qdio(adapter); 64
165 zfcp_fsf_req_dismiss_all(adapter); 65 list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
166 /* reset FSF request sequence number */ 66 if (act == curr_act)
167 adapter->fsf_req_seq_no = 0; 67 return ZFCP_ERP_ACTION_RUNNING;
168 /* all ports and units are closed */ 68 return 0;
169 zfcp_erp_modify_adapter_status(adapter, 24, NULL,
170 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
171} 69}
172 70
173/** 71static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
174 * zfcp_fsf_request_timeout_handler - called if a request timed out
175 * @data: pointer to adapter for handler function
176 *
177 * This function needs to be called if requests (ELS, Generic Service,
178 * or SCSI commands) exceed a certain time limit. The assumption is
179 * that after the time limit the adapter get stuck. So we trigger a reopen of
180 * the adapter.
181 */
182static void zfcp_fsf_request_timeout_handler(unsigned long data)
183{ 72{
184 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 73 struct zfcp_adapter *adapter = act->adapter;
185 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, 74
186 NULL); 75 list_move(&act->list, &act->adapter->erp_ready_head);
76 zfcp_rec_dbf_event_action(146, act);
77 up(&adapter->erp_ready_sem);
78 zfcp_rec_dbf_event_thread(2, adapter);
187} 79}
188 80
189void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) 81static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
190{ 82{
191 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 83 act->status |= ZFCP_STATUS_ERP_DISMISSED;
192 fsf_req->timer.data = (unsigned long) fsf_req->adapter; 84 if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
193 fsf_req->timer.expires = jiffies + timeout; 85 zfcp_erp_action_ready(act);
194 add_timer(&fsf_req->timer);
195} 86}
196 87
197/* 88static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
198 * function:
199 *
200 * purpose: called if an adapter failed,
201 * initiates adapter recovery which is done
202 * asynchronously
203 *
204 * returns: 0 - initiated action successfully
205 * <0 - failed to initiate action
206 */
207static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter,
208 int clear_mask, u8 id, void *ref)
209{ 89{
210 int retval; 90 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
91 zfcp_erp_action_dismiss(&unit->erp_action);
92}
211 93
212 ZFCP_LOG_DEBUG("reopen adapter %s\n", 94static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
213 zfcp_get_busid_by_adapter(adapter)); 95{
96 struct zfcp_unit *unit;
214 97
215 zfcp_erp_adapter_block(adapter, clear_mask); 98 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
99 zfcp_erp_action_dismiss(&port->erp_action);
100 else
101 list_for_each_entry(unit, &port->unit_list_head, list)
102 zfcp_erp_action_dismiss_unit(unit);
103}
216 104
217 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { 105static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
218 ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n", 106{
219 zfcp_get_busid_by_adapter(adapter)); 107 struct zfcp_port *port;
220 /* ensure propagation of failed status to new devices */
221 zfcp_erp_adapter_failed(adapter, 13, NULL);
222 retval = -EIO;
223 goto out;
224 }
225 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
226 adapter, NULL, NULL, id, ref);
227 108
228 out: 109 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
229 return retval; 110 zfcp_erp_action_dismiss(&adapter->erp_action);
111 else
112 list_for_each_entry(port, &adapter->port_list_head, list)
113 zfcp_erp_action_dismiss_port(port);
230} 114}
231 115
232/* 116static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
233 * function: 117 struct zfcp_port *port,
234 * 118 struct zfcp_unit *unit)
235 * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
236 * used to ensure the correct locking
237 *
238 * returns: 0 - initiated action successfully
239 * <0 - failed to initiate action
240 */
241int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask,
242 u8 id, void *ref)
243{ 119{
244 int retval; 120 int need = want;
245 unsigned long flags; 121 int u_status, p_status, a_status;
246 122
247 read_lock_irqsave(&zfcp_data.config_lock, flags); 123 switch (want) {
248 write_lock(&adapter->erp_lock); 124 case ZFCP_ERP_ACTION_REOPEN_UNIT:
249 retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref); 125 u_status = atomic_read(&unit->status);
250 write_unlock(&adapter->erp_lock); 126 if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE)
251 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 127 return 0;
128 p_status = atomic_read(&port->status);
129 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
130 p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
131 return 0;
132 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
133 need = ZFCP_ERP_ACTION_REOPEN_PORT;
134 /* fall through */
135 case ZFCP_ERP_ACTION_REOPEN_PORT:
136 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
137 p_status = atomic_read(&port->status);
138 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
139 return 0;
140 a_status = atomic_read(&adapter->status);
141 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
142 a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
143 return 0;
144 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
145 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
146 /* fall through */
147 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
148 a_status = atomic_read(&adapter->status);
149 if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
150 return 0;
151 }
252 152
253 return retval; 153 return need;
254} 154}
255 155
256int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask, 156static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
257 u8 id, void *ref) 157 struct zfcp_adapter *adapter,
158 struct zfcp_port *port,
159 struct zfcp_unit *unit)
258{ 160{
259 int retval; 161 struct zfcp_erp_action *erp_action;
162 u32 status = 0;
260 163
261 retval = zfcp_erp_adapter_reopen(adapter, 164 switch (need) {
262 ZFCP_STATUS_COMMON_RUNNING | 165 case ZFCP_ERP_ACTION_REOPEN_UNIT:
263 ZFCP_STATUS_COMMON_ERP_FAILED | 166 zfcp_unit_get(unit);
264 clear_mask, id, ref); 167 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
168 erp_action = &unit->erp_action;
169 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
170 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
171 break;
265 172
266 return retval; 173 case ZFCP_ERP_ACTION_REOPEN_PORT:
267} 174 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
175 zfcp_port_get(port);
176 zfcp_erp_action_dismiss_port(port);
177 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
178 erp_action = &port->erp_action;
179 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
180 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
181 break;
268 182
269int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id, 183 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
270 void *ref) 184 zfcp_adapter_get(adapter);
271{ 185 zfcp_erp_action_dismiss_adapter(adapter);
272 int retval; 186 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
187 erp_action = &adapter->erp_action;
188 if (!(atomic_read(&adapter->status) &
189 ZFCP_STATUS_COMMON_RUNNING))
190 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
191 break;
273 192
274 retval = zfcp_erp_port_reopen(port, 193 default:
275 ZFCP_STATUS_COMMON_RUNNING | 194 return NULL;
276 ZFCP_STATUS_COMMON_ERP_FAILED | 195 }
277 clear_mask, id, ref);
278 196
279 return retval; 197 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
198 erp_action->adapter = adapter;
199 erp_action->port = port;
200 erp_action->unit = unit;
201 erp_action->action = need;
202 erp_action->status = status;
203
204 return erp_action;
280} 205}
281 206
282int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id, 207static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
283 void *ref) 208 struct zfcp_port *port,
209 struct zfcp_unit *unit, u8 id, void *ref)
284{ 210{
285 int retval; 211 int retval = 1, need;
212 struct zfcp_erp_action *act = NULL;
213
214 if (!(atomic_read(&adapter->status) &
215 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
216 return -EIO;
286 217
287 retval = zfcp_erp_unit_reopen(unit, 218 need = zfcp_erp_required_act(want, adapter, port, unit);
288 ZFCP_STATUS_COMMON_RUNNING | 219 if (!need)
289 ZFCP_STATUS_COMMON_ERP_FAILED | 220 goto out;
290 clear_mask, id, ref);
291 221
222 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
223 act = zfcp_erp_setup_act(need, adapter, port, unit);
224 if (!act)
225 goto out;
226 ++adapter->erp_total_count;
227 list_add_tail(&act->list, &adapter->erp_ready_head);
228 up(&adapter->erp_ready_sem);
229 zfcp_rec_dbf_event_thread(1, adapter);
230 retval = 0;
231 out:
232 zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
233 adapter, port, unit);
292 return retval; 234 return retval;
293} 235}
294 236
295 237static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
296/** 238 int clear_mask, u8 id, void *ref)
297 * zfcp_erp_adisc - send ADISC ELS command
298 * @port: port structure
299 */
300static int
301zfcp_erp_adisc(struct zfcp_port *port)
302{ 239{
303 struct zfcp_adapter *adapter = port->adapter; 240 zfcp_erp_adapter_block(adapter, clear_mask);
304 struct zfcp_send_els *send_els;
305 struct zfcp_ls_adisc *adisc;
306 void *address = NULL;
307 int retval = 0;
308
309 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
310 if (send_els == NULL)
311 goto nomem;
312
313 send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
314 if (send_els->req == NULL)
315 goto nomem;
316 sg_init_table(send_els->req, 1);
317
318 send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
319 if (send_els->resp == NULL)
320 goto nomem;
321 sg_init_table(send_els->resp, 1);
322
323 address = (void *) get_zeroed_page(GFP_ATOMIC);
324 if (address == NULL)
325 goto nomem;
326
327 zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc));
328 address += PAGE_SIZE >> 1;
329 zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc));
330 send_els->req_count = send_els->resp_count = 1;
331
332 send_els->adapter = adapter;
333 send_els->port = port;
334 send_els->d_id = port->d_id;
335 send_els->handler = zfcp_erp_adisc_handler;
336 send_els->handler_data = (unsigned long) send_els;
337
338 adisc = zfcp_sg_to_address(send_els->req);
339 send_els->ls_code = adisc->code = ZFCP_LS_ADISC;
340
341 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
342 without FC-AL-2 capability, so we don't set it */
343 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
344 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
345 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
346 ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x "
347 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
348 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
349 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
350 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
351 adisc->nport_id);
352
353 retval = zfcp_fsf_send_els(send_els);
354 if (retval != 0) {
355 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
356 "0x%06x on adapter %s\n", send_els->d_id,
357 zfcp_get_busid_by_adapter(adapter));
358 goto freemem;
359 }
360 241
361 goto out; 242 /* ensure propagation of failed status to new devices */
362 243 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
363 nomem: 244 zfcp_erp_adapter_failed(adapter, 13, NULL);
364 retval = -ENOMEM; 245 return -EIO;
365 freemem:
366 if (address != NULL)
367 __free_pages(sg_page(send_els->req), 0);
368 if (send_els != NULL) {
369 kfree(send_els->req);
370 kfree(send_els->resp);
371 kfree(send_els);
372 } 246 }
373 out: 247 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
374 return retval; 248 adapter, NULL, NULL, id, ref);
375} 249}
376 250
377
378/** 251/**
379 * zfcp_erp_adisc_handler - handler for ADISC ELS command 252 * zfcp_erp_adapter_reopen - Reopen adapter.
380 * @data: pointer to struct zfcp_send_els 253 * @adapter: Adapter to reopen.
381 * 254 * @clear: Status flags to clear.
382 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 255 * @id: Id for debug trace event.
256 * @ref: Reference for debug trace event.
383 */ 257 */
384static void 258void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
385zfcp_erp_adisc_handler(unsigned long data) 259 u8 id, void *ref)
386{ 260{
387 struct zfcp_send_els *send_els; 261 unsigned long flags;
388 struct zfcp_port *port;
389 struct zfcp_adapter *adapter;
390 u32 d_id;
391 struct zfcp_ls_adisc_acc *adisc;
392
393 send_els = (struct zfcp_send_els *) data;
394 adapter = send_els->adapter;
395 port = send_els->port;
396 d_id = send_els->d_id;
397
398 /* request rejected or timed out */
399 if (send_els->status != 0) {
400 ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
401 "force physical port reopen "
402 "(adapter %s, port d_id=0x%06x)\n",
403 zfcp_get_busid_by_adapter(adapter), d_id);
404 if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL))
405 ZFCP_LOG_NORMAL("failed reopen of port "
406 "(adapter %s, wwpn=0x%016Lx)\n",
407 zfcp_get_busid_by_port(port),
408 port->wwpn);
409 goto out;
410 }
411
412 adisc = zfcp_sg_to_address(send_els->resp);
413
414 ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id "
415 "0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
416 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
417 d_id, fc_host_port_id(adapter->scsi_host),
418 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
419 adisc->hard_nport_id, adisc->nport_id);
420
421 /* set wwnn for port */
422 if (port->wwnn == 0)
423 port->wwnn = adisc->wwnn;
424
425 if (port->wwpn != adisc->wwpn) {
426 ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
427 "port (adapter %s, wwpn=0x%016Lx, "
428 "adisc_resp_wwpn=0x%016Lx)\n",
429 zfcp_get_busid_by_port(port),
430 port->wwpn, (wwn_t) adisc->wwpn);
431 if (zfcp_erp_port_reopen(port, 0, 64, NULL))
432 ZFCP_LOG_NORMAL("failed reopen of port "
433 "(adapter %s, wwpn=0x%016Lx)\n",
434 zfcp_get_busid_by_port(port),
435 port->wwpn);
436 }
437 262
438 out: 263 read_lock_irqsave(&zfcp_data.config_lock, flags);
439 zfcp_port_put(port); 264 write_lock(&adapter->erp_lock);
440 __free_pages(sg_page(send_els->req), 0); 265 _zfcp_erp_adapter_reopen(adapter, clear, id, ref);
441 kfree(send_els->req); 266 write_unlock(&adapter->erp_lock);
442 kfree(send_els->resp); 267 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
443 kfree(send_els);
444} 268}
445 269
446
447/** 270/**
448 * zfcp_test_link - lightweight link test procedure 271 * zfcp_erp_adapter_shutdown - Shutdown adapter.
449 * @port: port to be tested 272 * @adapter: Adapter to shut down.
450 * 273 * @clear: Status flags to clear.
451 * Test status of a link to a remote port using the ELS command ADISC. 274 * @id: Id for debug trace event.
275 * @ref: Reference for debug trace event.
452 */ 276 */
453int 277void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
454zfcp_test_link(struct zfcp_port *port) 278 u8 id, void *ref)
455{ 279{
456 int retval; 280 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
457 281 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
458 zfcp_port_get(port);
459 retval = zfcp_erp_adisc(port);
460 if (retval != 0 && retval != -EBUSY) {
461 zfcp_port_put(port);
462 ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
463 "on adapter %s\n ", port->wwpn,
464 zfcp_get_busid_by_port(port));
465 retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
466 if (retval != 0) {
467 ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
468 "on adapter %s failed\n", port->wwpn,
469 zfcp_get_busid_by_port(port));
470 retval = -EPERM;
471 }
472 }
473
474 return retval;
475} 282}
476 283
477 284/**
478/* 285 * zfcp_erp_port_shutdown - Shutdown port
479 * function: 286 * @port: Port to shut down.
480 * 287 * @clear: Status flags to clear.
481 * purpose: called if a port failed to be opened normally 288 * @id: Id for debug trace event.
482 * initiates Forced Reopen recovery which is done 289 * @ref: Reference for debug trace event.
483 * asynchronously
484 *
485 * returns: 0 - initiated action successfully
486 * <0 - failed to initiate action
487 */ 290 */
488static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, 291void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
489 int clear_mask, u8 id,
490 void *ref)
491{ 292{
492 int retval; 293 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
294 zfcp_erp_port_reopen(port, clear | flags, id, ref);
295}
493 296
494 ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n", 297/**
495 port->wwpn, zfcp_get_busid_by_port(port)); 298 * zfcp_erp_unit_shutdown - Shutdown unit
299 * @unit: Unit to shut down.
300 * @clear: Status flags to clear.
301 * @id: Id for debug trace event.
302 * @ref: Reference for debug trace event.
303 */
304void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
305{
306 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
307 zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
308}
496 309
497 zfcp_erp_port_block(port, clear_mask); 310static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
311{
312 zfcp_erp_modify_port_status(port, 17, NULL,
313 ZFCP_STATUS_COMMON_UNBLOCKED | clear,
314 ZFCP_CLEAR);
315}
498 316
499 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { 317static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
500 ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx " 318 int clear, u8 id, void *ref)
501 "on adapter %s\n", port->wwpn, 319{
502 zfcp_get_busid_by_port(port)); 320 zfcp_erp_port_block(port, clear);
503 retval = -EIO;
504 goto out;
505 }
506 321
507 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 322 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
508 port->adapter, port, NULL, id, ref); 323 return;
509 324
510 out: 325 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
511 return retval; 326 port->adapter, port, NULL, id, ref);
512} 327}
513 328
514/* 329/**
515 * function: 330 * zfcp_erp_port_forced_reopen - Forced close of port and open again
516 * 331 * @port: Port to force close and to reopen.
517 * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal 332 * @id: Id for debug trace event.
518 * used to ensure the correct locking 333 * @ref: Reference for debug trace event.
519 *
520 * returns: 0 - initiated action successfully
521 * <0 - failed to initiate action
522 */ 334 */
523int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id, 335void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
524 void *ref) 336 void *ref)
525{ 337{
526 int retval;
527 unsigned long flags; 338 unsigned long flags;
528 struct zfcp_adapter *adapter; 339 struct zfcp_adapter *adapter = port->adapter;
529 340
530 adapter = port->adapter;
531 read_lock_irqsave(&zfcp_data.config_lock, flags); 341 read_lock_irqsave(&zfcp_data.config_lock, flags);
532 write_lock(&adapter->erp_lock); 342 write_lock(&adapter->erp_lock);
533 retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id, 343 _zfcp_erp_port_forced_reopen(port, clear, id, ref);
534 ref);
535 write_unlock(&adapter->erp_lock); 344 write_unlock(&adapter->erp_lock);
536 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 345 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
537
538 return retval;
539} 346}
540 347
541/* 348static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
542 * function: 349 void *ref)
543 *
544 * purpose: called if a port is to be opened
545 * initiates Reopen recovery which is done
546 * asynchronously
547 *
548 * returns: 0 - initiated action successfully
549 * <0 - failed to initiate action
550 */
551static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask,
552 u8 id, void *ref)
553{ 350{
554 int retval; 351 zfcp_erp_port_block(port, clear);
555
556 ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
557 port->wwpn, zfcp_get_busid_by_port(port));
558 352
559 zfcp_erp_port_block(port, clear_mask); 353 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
560
561 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
562 ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
563 "on adapter %s\n", port->wwpn,
564 zfcp_get_busid_by_port(port));
565 /* ensure propagation of failed status to new devices */ 354 /* ensure propagation of failed status to new devices */
566 zfcp_erp_port_failed(port, 14, NULL); 355 zfcp_erp_port_failed(port, 14, NULL);
567 retval = -EIO; 356 return -EIO;
568 goto out;
569 } 357 }
570 358
571 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 359 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
572 port->adapter, port, NULL, id, ref); 360 port->adapter, port, NULL, id, ref);
573
574 out:
575 return retval;
576} 361}
577 362
578/** 363/**
579 * zfcp_erp_port_reopen - initiate reopen of a remote port 364 * zfcp_erp_port_reopen - trigger remote port recovery
580 * @port: port to be reopened 365 * @port: port to recover
581 * @clear_mask: specifies flags in port status to be cleared 366 * @clear_mask: flags in port status to be cleared
582 * Return: 0 on success, < 0 on error
583 * 367 *
584 * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures 368 * Returns 0 if recovery has been triggered, < 0 if not.
585 * correct locking. An error recovery task is initiated to do the reopen.
586 * To wait for the completion of the reopen zfcp_erp_wait should be used.
587 */ 369 */
588int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id, 370int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
589 void *ref)
590{ 371{
591 int retval;
592 unsigned long flags; 372 unsigned long flags;
373 int retval;
593 struct zfcp_adapter *adapter = port->adapter; 374 struct zfcp_adapter *adapter = port->adapter;
594 375
595 read_lock_irqsave(&zfcp_data.config_lock, flags); 376 read_lock_irqsave(&zfcp_data.config_lock, flags);
596 write_lock(&adapter->erp_lock); 377 write_lock(&adapter->erp_lock);
597 retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref); 378 retval = _zfcp_erp_port_reopen(port, clear, id, ref);
598 write_unlock(&adapter->erp_lock); 379 write_unlock(&adapter->erp_lock);
599 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 380 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
600 381
601 return retval; 382 return retval;
602} 383}
603 384
604/* 385static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
605 * function:
606 *
607 * purpose: called if a unit is to be opened
608 * initiates Reopen recovery which is done
609 * asynchronously
610 *
611 * returns: 0 - initiated action successfully
612 * <0 - failed to initiate action
613 */
614static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
615 u8 id, void *ref)
616{ 386{
617 int retval; 387 zfcp_erp_modify_unit_status(unit, 19, NULL,
618 struct zfcp_adapter *adapter = unit->port->adapter; 388 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
389 ZFCP_CLEAR);
390}
619 391
620 ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx " 392static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
621 "on adapter %s\n", unit->fcp_lun, 393 void *ref)
622 unit->port->wwpn, zfcp_get_busid_by_unit(unit)); 394{
395 struct zfcp_adapter *adapter = unit->port->adapter;
623 396
624 zfcp_erp_unit_block(unit, clear_mask); 397 zfcp_erp_unit_block(unit, clear);
625 398
626 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) { 399 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
627 ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx " 400 return;
628 "on port 0x%016Lx on adapter %s\n",
629 unit->fcp_lun, unit->port->wwpn,
630 zfcp_get_busid_by_unit(unit));
631 retval = -EIO;
632 goto out;
633 }
634 401
635 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, 402 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
636 adapter, unit->port, unit, id, ref); 403 adapter, unit->port, unit, id, ref);
637 out:
638 return retval;
639} 404}
640 405
641/** 406/**
@@ -643,987 +408,182 @@ static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
643 * @unit: unit to be reopened 408 * @unit: unit to be reopened
644 * @clear_mask: specifies flags in unit status to be cleared 409 * @clear_mask: specifies flags in unit status to be cleared
645 * Return: 0 on success, < 0 on error 410 * Return: 0 on success, < 0 on error
646 *
647 * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct
648 * locking. An error recovery task is initiated to do the reopen.
649 * To wait for the completion of the reopen zfcp_erp_wait should be used.
650 */ 411 */
651int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id, 412void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref)
652 void *ref)
653{ 413{
654 int retval;
655 unsigned long flags; 414 unsigned long flags;
656 struct zfcp_adapter *adapter; 415 struct zfcp_port *port = unit->port;
657 struct zfcp_port *port; 416 struct zfcp_adapter *adapter = port->adapter;
658
659 port = unit->port;
660 adapter = port->adapter;
661 417
662 read_lock_irqsave(&zfcp_data.config_lock, flags); 418 read_lock_irqsave(&zfcp_data.config_lock, flags);
663 write_lock(&adapter->erp_lock); 419 write_lock(&adapter->erp_lock);
664 retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); 420 _zfcp_erp_unit_reopen(unit, clear, id, ref);
665 write_unlock(&adapter->erp_lock); 421 write_unlock(&adapter->erp_lock);
666 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 422 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
667
668 return retval;
669} 423}
670 424
671/** 425static int status_change_set(unsigned long mask, atomic_t *status)
672 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
673 */
674static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
675{
676 zfcp_erp_modify_adapter_status(adapter, 15, NULL,
677 ZFCP_STATUS_COMMON_UNBLOCKED |
678 clear_mask, ZFCP_CLEAR);
679}
680
681/* FIXME: isn't really atomic */
682/*
683 * returns the mask which has not been set so far, i.e.
684 * 0 if no bit has been changed, !0 if some bit has been changed
685 */
686static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v)
687{ 426{
688 int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask; 427 return (atomic_read(status) ^ mask) & mask;
689 atomic_set_mask(mask, v);
690 return changed_bits;
691} 428}
692 429
693/* FIXME: isn't really atomic */ 430static int status_change_clear(unsigned long mask, atomic_t *status)
694/*
695 * returns the mask which has not been cleared so far, i.e.
696 * 0 if no bit has been changed, !0 if some bit has been changed
697 */
698static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v)
699{ 431{
700 int changed_bits = atomic_read(v) & mask; 432 return atomic_read(status) & mask;
701 atomic_clear_mask(mask, v);
702 return changed_bits;
703} 433}
704 434
705/**
706 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
707 */
708static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 435static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
709{ 436{
710 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 437 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
711 &adapter->status))
712 zfcp_rec_dbf_event_adapter(16, NULL, adapter); 438 zfcp_rec_dbf_event_adapter(16, NULL, adapter);
439 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
713} 440}
714 441
715/* 442static void zfcp_erp_port_unblock(struct zfcp_port *port)
716 * function:
717 *
718 * purpose: disable I/O,
719 * return any open requests and clean them up,
720 * aim: no pending and incoming I/O
721 *
722 * returns:
723 */
724static void
725zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
726{
727 zfcp_erp_modify_port_status(port, 17, NULL,
728 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
729 ZFCP_CLEAR);
730}
731
732/*
733 * function:
734 *
735 * purpose: enable I/O
736 *
737 * returns:
738 */
739static void
740zfcp_erp_port_unblock(struct zfcp_port *port)
741{ 443{
742 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 444 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
743 &port->status))
744 zfcp_rec_dbf_event_port(18, NULL, port); 445 zfcp_rec_dbf_event_port(18, NULL, port);
446 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
745} 447}
746 448
747/* 449static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
748 * function:
749 *
750 * purpose: disable I/O,
751 * return any open requests and clean them up,
752 * aim: no pending and incoming I/O
753 *
754 * returns:
755 */
756static void
757zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
758{
759 zfcp_erp_modify_unit_status(unit, 19, NULL,
760 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
761 ZFCP_CLEAR);
762}
763
764/*
765 * function:
766 *
767 * purpose: enable I/O
768 *
769 * returns:
770 */
771static void
772zfcp_erp_unit_unblock(struct zfcp_unit *unit)
773{ 450{
774 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 451 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
775 &unit->status))
776 zfcp_rec_dbf_event_unit(20, NULL, unit); 452 zfcp_rec_dbf_event_unit(20, NULL, unit);
453 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
777} 454}
778 455
779static void 456static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
780zfcp_erp_action_ready(struct zfcp_erp_action *erp_action)
781{ 457{
782 struct zfcp_adapter *adapter = erp_action->adapter; 458 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
783 459 zfcp_rec_dbf_event_action(145, erp_action);
784 zfcp_erp_action_to_ready(erp_action);
785 up(&adapter->erp_ready_sem);
786 zfcp_rec_dbf_event_thread(2, adapter, 0);
787} 460}
788 461
789/* 462static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
790 * function:
791 *
792 * purpose:
793 *
794 * returns: <0 erp_action not found in any list
795 * ZFCP_ERP_ACTION_READY erp_action is in ready list
796 * ZFCP_ERP_ACTION_RUNNING erp_action is in running list
797 *
798 * locks: erp_lock must be held
799 */
800static int
801zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
802{ 463{
803 int retval = -EINVAL; 464 struct zfcp_adapter *adapter = act->adapter;
804 struct list_head *entry;
805 struct zfcp_erp_action *entry_erp_action;
806 struct zfcp_adapter *adapter = erp_action->adapter;
807
808 /* search in running list */
809 list_for_each(entry, &adapter->erp_running_head) {
810 entry_erp_action =
811 list_entry(entry, struct zfcp_erp_action, list);
812 if (entry_erp_action == erp_action) {
813 retval = ZFCP_ERP_ACTION_RUNNING;
814 goto out;
815 }
816 }
817 /* search in ready list */
818 list_for_each(entry, &adapter->erp_ready_head) {
819 entry_erp_action =
820 list_entry(entry, struct zfcp_erp_action, list);
821 if (entry_erp_action == erp_action) {
822 retval = ZFCP_ERP_ACTION_READY;
823 goto out;
824 }
825 }
826 465
827 out: 466 if (!act->fsf_req)
828 return retval; 467 return;
829}
830
831/*
832 * purpose: checks current status of action (timed out, dismissed, ...)
833 * and does appropriate preparations (dismiss fsf request, ...)
834 *
835 * locks: called under erp_lock (disabled interrupts)
836 */
837static void
838zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
839{
840 struct zfcp_adapter *adapter = erp_action->adapter;
841 468
842 if (erp_action->fsf_req) { 469 spin_lock(&adapter->req_list_lock);
843 /* take lock to ensure that request is not deleted meanwhile */ 470 if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
844 spin_lock(&adapter->req_list_lock); 471 act->fsf_req->erp_action == act) {
845 if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) && 472 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
846 erp_action->fsf_req->erp_action == erp_action) { 473 ZFCP_STATUS_ERP_TIMEDOUT)) {
847 /* fsf_req still exists */ 474 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
848 /* dismiss fsf_req of timed out/dismissed erp_action */ 475 zfcp_rec_dbf_event_action(142, act);
849 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
850 ZFCP_STATUS_ERP_TIMEDOUT)) {
851 erp_action->fsf_req->status |=
852 ZFCP_STATUS_FSFREQ_DISMISSED;
853 zfcp_rec_dbf_event_action(142, erp_action);
854 }
855 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
856 zfcp_rec_dbf_event_action(143, erp_action);
857 ZFCP_LOG_NORMAL("error: erp step timed out "
858 "(action=%d, fsf_req=%p)\n ",
859 erp_action->action,
860 erp_action->fsf_req);
861 }
862 /*
863 * If fsf_req is neither dismissed nor completed
864 * then keep it running asynchronously and don't mess
865 * with the association of erp_action and fsf_req.
866 */
867 if (erp_action->fsf_req->status &
868 (ZFCP_STATUS_FSFREQ_COMPLETED |
869 ZFCP_STATUS_FSFREQ_DISMISSED)) {
870 /* forget about association between fsf_req
871 and erp_action */
872 erp_action->fsf_req = NULL;
873 }
874 } else {
875 /*
876 * even if this fsf_req has gone, forget about
877 * association between erp_action and fsf_req
878 */
879 erp_action->fsf_req = NULL;
880 } 476 }
881 spin_unlock(&adapter->req_list_lock); 477 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
882 } 478 zfcp_rec_dbf_event_action(143, act);
479 if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
480 ZFCP_STATUS_FSFREQ_DISMISSED))
481 act->fsf_req = NULL;
482 } else
483 act->fsf_req = NULL;
484 spin_unlock(&adapter->req_list_lock);
883} 485}
884 486
885/** 487/**
886 * zfcp_erp_async_handler_nolock - complete erp_action 488 * zfcp_erp_notify - Trigger ERP action.
887 * 489 * @erp_action: ERP action to continue.
888 * Used for normal completion, time-out, dismissal and failure after 490 * @set_mask: ERP action status flags to set.
889 * low memory condition.
890 */ 491 */
891static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 492void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
892 unsigned long set_mask)
893{
894 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
895 erp_action->status |= set_mask;
896 zfcp_erp_action_ready(erp_action);
897 } else {
898 /* action is ready or gone - nothing to do */
899 }
900}
901
902/**
903 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
904 */
905void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
906 unsigned long set_mask)
907{ 493{
908 struct zfcp_adapter *adapter = erp_action->adapter; 494 struct zfcp_adapter *adapter = erp_action->adapter;
909 unsigned long flags; 495 unsigned long flags;
910 496
911 write_lock_irqsave(&adapter->erp_lock, flags); 497 write_lock_irqsave(&adapter->erp_lock, flags);
912 zfcp_erp_async_handler_nolock(erp_action, set_mask); 498 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
913 write_unlock_irqrestore(&adapter->erp_lock, flags); 499 erp_action->status |= set_mask;
914}
915
916/*
917 * purpose: is called for erp_action which was slept waiting for
918 * memory becoming avaliable,
919 * will trigger that this action will be continued
920 */
921static void
922zfcp_erp_memwait_handler(unsigned long data)
923{
924 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
925
926 zfcp_erp_async_handler(erp_action, 0);
927}
928
929/*
930 * purpose: is called if an asynchronous erp step timed out,
931 * action gets an appropriate flag and will be processed
932 * accordingly
933 */
934static void zfcp_erp_timeout_handler(unsigned long data)
935{
936 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
937
938 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
939}
940
941/**
942 * zfcp_erp_action_dismiss - dismiss an erp_action
943 *
944 * adapter->erp_lock must be held
945 *
946 * Dismissal of an erp_action is usually required if an erp_action of
947 * higher priority is generated.
948 */
949static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
950{
951 erp_action->status |= ZFCP_STATUS_ERP_DISMISSED;
952 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING)
953 zfcp_erp_action_ready(erp_action); 500 zfcp_erp_action_ready(erp_action);
954}
955
956int
957zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
958{
959 int retval = 0;
960
961 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
962
963 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
964 if (retval < 0) {
965 ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
966 "adapter %s\n",
967 zfcp_get_busid_by_adapter(adapter));
968 } else {
969 wait_event(adapter->erp_thread_wqh,
970 atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
971 &adapter->status));
972 } 501 }
973 502 write_unlock_irqrestore(&adapter->erp_lock, flags);
974 return (retval < 0);
975}
976
977/*
978 * function:
979 *
980 * purpose:
981 *
982 * returns:
983 *
984 * context: process (i.e. proc-fs or rmmod/insmod)
985 *
986 * note: The caller of this routine ensures that the specified
987 * adapter has been shut down and that this operation
988 * has been completed. Thus, there are no pending erp_actions
989 * which would need to be handled here.
990 */
991int
992zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
993{
994 int retval = 0;
995
996 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
997 up(&adapter->erp_ready_sem);
998 zfcp_rec_dbf_event_thread(2, adapter, 1);
999
1000 wait_event(adapter->erp_thread_wqh,
1001 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
1002 &adapter->status));
1003
1004 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1005 &adapter->status);
1006
1007 return retval;
1008}
1009
1010/*
1011 * purpose: is run as a kernel thread,
1012 * goes through list of error recovery actions of associated adapter
1013 * and delegates single action to execution
1014 *
1015 * returns: 0
1016 */
1017static int
1018zfcp_erp_thread(void *data)
1019{
1020 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
1021 struct list_head *next;
1022 struct zfcp_erp_action *erp_action;
1023 unsigned long flags;
1024
1025 daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter));
1026 /* Block all signals */
1027 siginitsetinv(&current->blocked, 0);
1028 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1029 wake_up(&adapter->erp_thread_wqh);
1030
1031 while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1032 &adapter->status)) {
1033
1034 write_lock_irqsave(&adapter->erp_lock, flags);
1035 next = adapter->erp_ready_head.next;
1036 write_unlock_irqrestore(&adapter->erp_lock, flags);
1037
1038 if (next != &adapter->erp_ready_head) {
1039 erp_action =
1040 list_entry(next, struct zfcp_erp_action, list);
1041 /*
1042 * process action (incl. [re]moving it
1043 * from 'ready' queue)
1044 */
1045 zfcp_erp_strategy(erp_action);
1046 }
1047
1048 /*
1049 * sleep as long as there is nothing to do, i.e.
1050 * no action in 'ready' queue to be processed and
1051 * thread is not to be killed
1052 */
1053 zfcp_rec_dbf_event_thread(4, adapter, 1);
1054 down_interruptible(&adapter->erp_ready_sem);
1055 zfcp_rec_dbf_event_thread(5, adapter, 1);
1056 }
1057
1058 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1059 wake_up(&adapter->erp_thread_wqh);
1060
1061 return 0;
1062}
1063
1064/*
1065 * function:
1066 *
1067 * purpose: drives single error recovery action and schedules higher and
1068 * subordinate actions, if necessary
1069 *
1070 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
1071 * ZFCP_ERP_SUCCEEDED - action finished successfully (deqd)
1072 * ZFCP_ERP_FAILED - action finished unsuccessfully (deqd)
1073 * ZFCP_ERP_EXIT - action finished (dequeued), offline
1074 * ZFCP_ERP_DISMISSED - action canceled (dequeued)
1075 */
1076static int
1077zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1078{
1079 int retval = 0;
1080 struct zfcp_adapter *adapter = erp_action->adapter;
1081 struct zfcp_port *port = erp_action->port;
1082 struct zfcp_unit *unit = erp_action->unit;
1083 int action = erp_action->action;
1084 u32 status = erp_action->status;
1085 unsigned long flags;
1086
1087 /* serialise dismissing, timing out, moving, enqueueing */
1088 read_lock_irqsave(&zfcp_data.config_lock, flags);
1089 write_lock(&adapter->erp_lock);
1090
1091 /* dequeue dismissed action and leave, if required */
1092 retval = zfcp_erp_strategy_check_action(erp_action, retval);
1093 if (retval == ZFCP_ERP_DISMISSED) {
1094 goto unlock;
1095 }
1096
1097 /*
1098 * move action to 'running' queue before processing it
1099 * (to avoid a race condition regarding moving the
1100 * action to the 'running' queue and back)
1101 */
1102 zfcp_erp_action_to_running(erp_action);
1103
1104 /*
1105 * try to process action as far as possible,
1106 * no lock to allow for blocking operations (kmalloc, qdio, ...),
1107 * afterwards the lock is required again for the following reasons:
1108 * - dequeueing of finished action and enqueueing of
1109 * follow-up actions must be atomic so that any other
1110 * reopen-routine does not believe there is nothing to do
1111 * and that it is safe to enqueue something else,
1112 * - we want to force any control thread which is dismissing
1113 * actions to finish this before we decide about
1114 * necessary steps to be taken here further
1115 */
1116 write_unlock(&adapter->erp_lock);
1117 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1118 retval = zfcp_erp_strategy_do_action(erp_action);
1119 read_lock_irqsave(&zfcp_data.config_lock, flags);
1120 write_lock(&adapter->erp_lock);
1121
1122 /*
1123 * check for dismissed status again to avoid follow-up actions,
1124 * failing of targets and so on for dismissed actions,
1125 * we go through down() here because there has been an up()
1126 */
1127 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1128 retval = ZFCP_ERP_CONTINUES;
1129
1130 switch (retval) {
1131 case ZFCP_ERP_NOMEM:
1132 /* no memory to continue immediately, let it sleep */
1133 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1134 ++adapter->erp_low_mem_count;
1135 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1136 }
1137 /* This condition is true if there is no memory available
1138 for any erp_action on this adapter. This implies that there
1139 are no elements in the memory pool(s) left for erp_actions.
1140 This might happen if an erp_action that used a memory pool
1141 element was timed out.
1142 */
1143 if (adapter->erp_total_count == adapter->erp_low_mem_count) {
1144 ZFCP_LOG_NORMAL("error: no mempool elements available, "
1145 "restarting I/O on adapter %s "
1146 "to free mempool\n",
1147 zfcp_get_busid_by_adapter(adapter));
1148 zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL);
1149 } else {
1150 retval = zfcp_erp_strategy_memwait(erp_action);
1151 }
1152 goto unlock;
1153 case ZFCP_ERP_CONTINUES:
1154 /* leave since this action runs asynchronously */
1155 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
1156 --adapter->erp_low_mem_count;
1157 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1158 }
1159 goto unlock;
1160 }
1161 /* ok, finished action (whatever its result is) */
1162
1163 /* check for unrecoverable targets */
1164 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1165
1166 /* action must be dequeued (here to allow for further ones) */
1167 zfcp_erp_action_dequeue(erp_action);
1168
1169 /*
1170 * put this target through the erp mill again if someone has
1171 * requested to change the status of a target being online
1172 * to offline or the other way around
1173 * (old retval is preserved if nothing has to be done here)
1174 */
1175 retval = zfcp_erp_strategy_statechange(action, status, adapter,
1176 port, unit, retval);
1177
1178 /*
1179 * leave if target is in permanent error state or if
1180 * action is repeated in order to process state change
1181 */
1182 if (retval == ZFCP_ERP_EXIT) {
1183 goto unlock;
1184 }
1185
1186 /* trigger follow up actions */
1187 zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval);
1188
1189 unlock:
1190 write_unlock(&adapter->erp_lock);
1191 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1192
1193 if (retval != ZFCP_ERP_CONTINUES)
1194 zfcp_erp_action_cleanup(action, adapter, port, unit, retval);
1195
1196 /*
1197 * a few tasks remain when the erp queues are empty
1198 * (don't do that if the last action evaluated was dismissed
1199 * since this clearly indicates that there is more to come) :
1200 * - close the name server port if it is open yet
1201 * (enqueues another [probably] final action)
1202 * - otherwise, wake up whoever wants to be woken when we are
1203 * done with erp
1204 */
1205 if (retval != ZFCP_ERP_DISMISSED)
1206 zfcp_erp_strategy_check_queues(adapter);
1207
1208 return retval;
1209} 503}
1210 504
1211/* 505/**
1212 * function: 506 * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
1213 * 507 * @data: ERP action (from timer data)
1214 * purpose:
1215 *
1216 * returns: ZFCP_ERP_DISMISSED - if action has been dismissed
1217 * retval - otherwise
1218 */ 508 */
1219static int 509void zfcp_erp_timeout_handler(unsigned long data)
1220zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
1221{ 510{
1222 zfcp_erp_strategy_check_fsfreq(erp_action); 511 struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
1223 512 zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
1224 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1225 zfcp_erp_action_dequeue(erp_action);
1226 retval = ZFCP_ERP_DISMISSED;
1227 }
1228
1229 return retval;
1230} 513}
1231 514
1232static int 515static void zfcp_erp_memwait_handler(unsigned long data)
1233zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1234{ 516{
1235 int retval = ZFCP_ERP_FAILED; 517 zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
1236
1237 /*
1238 * try to execute/continue action as far as possible,
1239 * note: no lock in subsequent strategy routines
1240 * (this allows these routine to call schedule, e.g.
1241 * kmalloc with such flags or qdio_initialize & friends)
1242 * Note: in case of timeout, the separate strategies will fail
1243 * anyhow. No need for a special action. Even worse, a nameserver
1244 * failure would not wake up waiting ports without the call.
1245 */
1246 switch (erp_action->action) {
1247
1248 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1249 retval = zfcp_erp_adapter_strategy(erp_action);
1250 break;
1251
1252 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1253 retval = zfcp_erp_port_forced_strategy(erp_action);
1254 break;
1255
1256 case ZFCP_ERP_ACTION_REOPEN_PORT:
1257 retval = zfcp_erp_port_strategy(erp_action);
1258 break;
1259
1260 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1261 retval = zfcp_erp_unit_strategy(erp_action);
1262 break;
1263
1264 default:
1265 ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
1266 "adapter %s (action=%d)\n",
1267 zfcp_get_busid_by_adapter(erp_action->adapter),
1268 erp_action->action);
1269 }
1270
1271 return retval;
1272} 518}
1273 519
1274/* 520static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1275 * function:
1276 *
1277 * purpose: triggers retry of this action after a certain amount of time
1278 * by means of timer provided by erp_action
1279 *
1280 * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
1281 */
1282static int
1283zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1284{ 521{
1285 int retval = ZFCP_ERP_CONTINUES;
1286
1287 init_timer(&erp_action->timer); 522 init_timer(&erp_action->timer);
1288 erp_action->timer.function = zfcp_erp_memwait_handler; 523 erp_action->timer.function = zfcp_erp_memwait_handler;
1289 erp_action->timer.data = (unsigned long) erp_action; 524 erp_action->timer.data = (unsigned long) erp_action;
1290 erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT; 525 erp_action->timer.expires = jiffies + HZ;
1291 add_timer(&erp_action->timer); 526 add_timer(&erp_action->timer);
1292
1293 return retval;
1294} 527}
1295 528
1296/* 529static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
1297 * function: zfcp_erp_adapter_failed 530 int clear, u8 id, void *ref)
1298 *
1299 * purpose: sets the adapter and all underlying devices to ERP_FAILED
1300 *
1301 */
1302void
1303zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1304{
1305 zfcp_erp_modify_adapter_status(adapter, id, ref,
1306 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1307 ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
1308 zfcp_get_busid_by_adapter(adapter));
1309}
1310
1311/*
1312 * function: zfcp_erp_port_failed
1313 *
1314 * purpose: sets the port and all underlying devices to ERP_FAILED
1315 *
1316 */
1317void
1318zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1319{
1320 zfcp_erp_modify_port_status(port, id, ref,
1321 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1322
1323 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
1324 ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
1325 "port d_id=0x%06x)\n",
1326 zfcp_get_busid_by_port(port), port->d_id);
1327 else
1328 ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
1329 zfcp_get_busid_by_port(port), port->wwpn);
1330}
1331
1332/*
1333 * function: zfcp_erp_unit_failed
1334 *
1335 * purpose: sets the unit to ERP_FAILED
1336 *
1337 */
1338void
1339zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
1340{
1341 zfcp_erp_modify_unit_status(unit, id, ref,
1342 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1343
1344 ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
1345 " on adapter %s\n", unit->fcp_lun,
1346 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
1347}
1348
1349/*
1350 * function: zfcp_erp_strategy_check_target
1351 *
1352 * purpose: increments the erp action count on the device currently in
1353 * recovery if the action failed or resets the count in case of
1354 * success. If a maximum count is exceeded the device is marked
1355 * as ERP_FAILED.
1356 * The 'blocked' state of a target which has been recovered
1357 * successfully is reset.
1358 *
1359 * returns: ZFCP_ERP_CONTINUES - action continues (not considered)
1360 * ZFCP_ERP_SUCCEEDED - action finished successfully
1361 * ZFCP_ERP_EXIT - action failed and will not continue
1362 */
1363static int
1364zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result)
1365{
1366 struct zfcp_adapter *adapter = erp_action->adapter;
1367 struct zfcp_port *port = erp_action->port;
1368 struct zfcp_unit *unit = erp_action->unit;
1369
1370 switch (erp_action->action) {
1371
1372 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1373 result = zfcp_erp_strategy_check_unit(unit, result);
1374 break;
1375
1376 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1377 case ZFCP_ERP_ACTION_REOPEN_PORT:
1378 result = zfcp_erp_strategy_check_port(port, result);
1379 break;
1380
1381 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1382 result = zfcp_erp_strategy_check_adapter(adapter, result);
1383 break;
1384 }
1385
1386 return result;
1387}
1388
1389static int
1390zfcp_erp_strategy_statechange(int action,
1391 u32 status,
1392 struct zfcp_adapter *adapter,
1393 struct zfcp_port *port,
1394 struct zfcp_unit *unit, int retval)
1395{
1396 switch (action) {
1397
1398 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1399 if (zfcp_erp_strategy_statechange_detected(&adapter->status,
1400 status)) {
1401 zfcp_erp_adapter_reopen_internal(adapter,
1402 ZFCP_STATUS_COMMON_ERP_FAILED,
1403 67, NULL);
1404 retval = ZFCP_ERP_EXIT;
1405 }
1406 break;
1407
1408 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1409 case ZFCP_ERP_ACTION_REOPEN_PORT:
1410 if (zfcp_erp_strategy_statechange_detected(&port->status,
1411 status)) {
1412 zfcp_erp_port_reopen_internal(port,
1413 ZFCP_STATUS_COMMON_ERP_FAILED,
1414 68, NULL);
1415 retval = ZFCP_ERP_EXIT;
1416 }
1417 break;
1418
1419 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1420 if (zfcp_erp_strategy_statechange_detected(&unit->status,
1421 status)) {
1422 zfcp_erp_unit_reopen_internal(unit,
1423 ZFCP_STATUS_COMMON_ERP_FAILED,
1424 69, NULL);
1425 retval = ZFCP_ERP_EXIT;
1426 }
1427 break;
1428 }
1429
1430 return retval;
1431}
1432
1433static int
1434zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
1435{ 531{
1436 return 532 struct zfcp_port *port;
1437 /* take it online */
1438 (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1439 (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
1440 /* take it offline */
1441 (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1442 !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
1443}
1444
1445static int
1446zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1447{
1448 switch (result) {
1449 case ZFCP_ERP_SUCCEEDED :
1450 atomic_set(&unit->erp_counter, 0);
1451 zfcp_erp_unit_unblock(unit);
1452 break;
1453 case ZFCP_ERP_FAILED :
1454 atomic_inc(&unit->erp_counter);
1455 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
1456 zfcp_erp_unit_failed(unit, 21, NULL);
1457 break;
1458 case ZFCP_ERP_EXIT :
1459 /* nothing */
1460 break;
1461 }
1462
1463 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
1464 zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
1465 result = ZFCP_ERP_EXIT;
1466 }
1467
1468 return result;
1469}
1470
1471static int
1472zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1473{
1474 switch (result) {
1475 case ZFCP_ERP_SUCCEEDED :
1476 atomic_set(&port->erp_counter, 0);
1477 zfcp_erp_port_unblock(port);
1478 break;
1479 case ZFCP_ERP_FAILED :
1480 atomic_inc(&port->erp_counter);
1481 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
1482 zfcp_erp_port_failed(port, 22, NULL);
1483 break;
1484 case ZFCP_ERP_EXIT :
1485 /* nothing */
1486 break;
1487 }
1488
1489 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
1490 zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
1491 result = ZFCP_ERP_EXIT;
1492 }
1493 533
1494 return result; 534 list_for_each_entry(port, &adapter->port_list_head, list)
535 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA))
536 _zfcp_erp_port_reopen(port, clear, id, ref);
1495} 537}
1496 538
1497static int 539static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
1498zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) 540 void *ref)
1499{ 541{
1500 switch (result) { 542 struct zfcp_unit *unit;
1501 case ZFCP_ERP_SUCCEEDED :
1502 atomic_set(&adapter->erp_counter, 0);
1503 zfcp_erp_adapter_unblock(adapter);
1504 break;
1505 case ZFCP_ERP_FAILED :
1506 atomic_inc(&adapter->erp_counter);
1507 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
1508 zfcp_erp_adapter_failed(adapter, 23, NULL);
1509 break;
1510 case ZFCP_ERP_EXIT :
1511 /* nothing */
1512 break;
1513 }
1514
1515 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
1516 zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
1517 result = ZFCP_ERP_EXIT;
1518 }
1519
1520 return result;
1521}
1522
1523struct zfcp_erp_add_work {
1524 struct zfcp_unit *unit;
1525 struct work_struct work;
1526};
1527 543
1528/** 544 list_for_each_entry(unit, &port->unit_list_head, list)
1529 * zfcp_erp_scsi_scan 545 _zfcp_erp_unit_reopen(unit, clear, id, ref);
1530 * @data: pointer to a struct zfcp_erp_add_work
1531 *
1532 * Registers a logical unit with the SCSI stack.
1533 */
1534static void zfcp_erp_scsi_scan(struct work_struct *work)
1535{
1536 struct zfcp_erp_add_work *p =
1537 container_of(work, struct zfcp_erp_add_work, work);
1538 struct zfcp_unit *unit = p->unit;
1539 struct fc_rport *rport = unit->port->rport;
1540 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1541 unit->scsi_lun, 0);
1542 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1543 zfcp_unit_put(unit);
1544 kfree(p);
1545} 546}
1546 547
1547/** 548static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
1548 * zfcp_erp_schedule_work
1549 * @unit: pointer to unit which should be registered with SCSI stack
1550 *
1551 * Schedules work which registers a unit with the SCSI stack
1552 */
1553static void
1554zfcp_erp_schedule_work(struct zfcp_unit *unit)
1555{ 549{
1556 struct zfcp_erp_add_work *p; 550 struct zfcp_adapter *adapter = act->adapter;
551 struct zfcp_port *port = act->port;
552 struct zfcp_unit *unit = act->unit;
553 u32 status = act->status;
1557 554
1558 p = kzalloc(sizeof(*p), GFP_KERNEL);
1559 if (!p) {
1560 ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
1561 "the FCP-LUN 0x%Lx connected to "
1562 "the port with WWPN 0x%Lx connected to "
1563 "the adapter %s with the SCSI stack.\n",
1564 unit->fcp_lun,
1565 unit->port->wwpn,
1566 zfcp_get_busid_by_unit(unit));
1567 return;
1568 }
1569
1570 zfcp_unit_get(unit);
1571 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1572 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1573 p->unit = unit;
1574 schedule_work(&p->work);
1575}
1576
1577/*
1578 * function:
1579 *
1580 * purpose: remaining things in good cases,
1581 * escalation in bad cases
1582 *
1583 * returns:
1584 */
1585static int
1586zfcp_erp_strategy_followup_actions(int action,
1587 struct zfcp_adapter *adapter,
1588 struct zfcp_port *port,
1589 struct zfcp_unit *unit, int status)
1590{
1591 /* initiate follow-up actions depending on success of finished action */ 555 /* initiate follow-up actions depending on success of finished action */
1592 switch (action) { 556 switch (act->action) {
1593 557
1594 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 558 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1595 if (status == ZFCP_ERP_SUCCEEDED) 559 if (status == ZFCP_ERP_SUCCEEDED)
1596 zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL); 560 _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL);
1597 else 561 else
1598 zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL); 562 _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL);
1599 break; 563 break;
1600 564
1601 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 565 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1602 if (status == ZFCP_ERP_SUCCEEDED) 566 if (status == ZFCP_ERP_SUCCEEDED)
1603 zfcp_erp_port_reopen_internal(port, 0, 72, NULL); 567 _zfcp_erp_port_reopen(port, 0, 72, NULL);
1604 else 568 else
1605 zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL); 569 _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL);
1606 break; 570 break;
1607 571
1608 case ZFCP_ERP_ACTION_REOPEN_PORT: 572 case ZFCP_ERP_ACTION_REOPEN_PORT:
1609 if (status == ZFCP_ERP_SUCCEEDED) 573 if (status == ZFCP_ERP_SUCCEEDED)
1610 zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL); 574 _zfcp_erp_unit_reopen_all(port, 0, 74, NULL);
1611 else 575 else
1612 zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL); 576 _zfcp_erp_port_forced_reopen(port, 0, 75, NULL);
1613 break; 577 break;
1614 578
1615 case ZFCP_ERP_ACTION_REOPEN_UNIT: 579 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1616 /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */
1617 if (status != ZFCP_ERP_SUCCEEDED) 580 if (status != ZFCP_ERP_SUCCEEDED)
1618 zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL); 581 _zfcp_erp_port_reopen(unit->port, 0, 76, NULL);
1619 break; 582 break;
1620 } 583 }
1621
1622 return 0;
1623} 584}
1624 585
1625static int 586static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
1626zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
1627{ 587{
1628 unsigned long flags; 588 unsigned long flags;
1629 589
@@ -1637,1277 +597,622 @@ zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
1637 } 597 }
1638 read_unlock(&adapter->erp_lock); 598 read_unlock(&adapter->erp_lock);
1639 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 599 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1640
1641 return 0;
1642} 600}
1643 601
1644/** 602static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
1645 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1646 * @adapter: adapter for which to wait for completion of its error recovery
1647 * Return: 0
1648 */
1649int
1650zfcp_erp_wait(struct zfcp_adapter *adapter)
1651{ 603{
1652 int retval = 0; 604 if (zfcp_qdio_open(act->adapter))
1653 605 return ZFCP_ERP_FAILED;
1654 wait_event(adapter->erp_done_wqh, 606 init_waitqueue_head(&act->adapter->request_wq);
1655 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 607 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
1656 &adapter->status)); 608 return ZFCP_ERP_SUCCEEDED;
1657
1658 return retval;
1659} 609}
1660 610
1661void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, 611static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
1662 void *ref, u32 mask, int set_or_clear)
1663{ 612{
1664 struct zfcp_port *port; 613 struct zfcp_port *port;
1665 u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; 614 port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
1666 615 adapter->peer_d_id);
1667 if (set_or_clear == ZFCP_SET) { 616 if (IS_ERR(port)) /* error or port already attached */
1668 changed = atomic_test_and_set_mask(mask, &adapter->status); 617 return;
1669 } else { 618 _zfcp_erp_port_reopen(port, 0, 150, NULL);
1670 changed = atomic_test_and_clear_mask(mask, &adapter->status);
1671 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1672 atomic_set(&adapter->erp_counter, 0);
1673 }
1674 if (changed)
1675 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1676
1677 /* Deal with all underlying devices, only pass common_mask */
1678 if (common_mask)
1679 list_for_each_entry(port, &adapter->port_list_head, list)
1680 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1681 set_or_clear);
1682} 619}
1683 620
1684/* 621static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
1685 * function: zfcp_erp_modify_port_status
1686 *
1687 * purpose: sets the port and all underlying devices to ERP_FAILED
1688 *
1689 */
1690void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
1691 u32 mask, int set_or_clear)
1692{ 622{
1693 struct zfcp_unit *unit; 623 int retries;
1694 u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; 624 int sleep = 1;
1695 625 struct zfcp_adapter *adapter = erp_action->adapter;
1696 if (set_or_clear == ZFCP_SET) {
1697 changed = atomic_test_and_set_mask(mask, &port->status);
1698 } else {
1699 changed = atomic_test_and_clear_mask(mask, &port->status);
1700 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1701 atomic_set(&port->erp_counter, 0);
1702 }
1703 if (changed)
1704 zfcp_rec_dbf_event_port(id, ref, port);
1705
1706 /* Modify status of all underlying devices, only pass common mask */
1707 if (common_mask)
1708 list_for_each_entry(unit, &port->unit_list_head, list)
1709 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1710 set_or_clear);
1711}
1712 626
1713/* 627 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
1714 * function: zfcp_erp_modify_unit_status
1715 *
1716 * purpose: sets the unit to ERP_FAILED
1717 *
1718 */
1719void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
1720 u32 mask, int set_or_clear)
1721{
1722 u32 changed;
1723 628
1724 if (set_or_clear == ZFCP_SET) { 629 for (retries = 7; retries; retries--) {
1725 changed = atomic_test_and_set_mask(mask, &unit->status); 630 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
1726 } else { 631 &adapter->status);
1727 changed = atomic_test_and_clear_mask(mask, &unit->status); 632 write_lock_irq(&adapter->erp_lock);
1728 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { 633 zfcp_erp_action_to_running(erp_action);
1729 atomic_set(&unit->erp_counter, 0); 634 write_unlock_irq(&adapter->erp_lock);
635 if (zfcp_fsf_exchange_config_data(erp_action)) {
636 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
637 &adapter->status);
638 return ZFCP_ERP_FAILED;
1730 } 639 }
1731 }
1732 if (changed)
1733 zfcp_rec_dbf_event_unit(id, ref, unit);
1734}
1735 640
1736/* 641 zfcp_rec_dbf_event_thread_lock(6, adapter);
1737 * function: 642 down(&adapter->erp_ready_sem);
1738 * 643 zfcp_rec_dbf_event_thread_lock(7, adapter);
1739 * purpose: Wrappper for zfcp_erp_port_reopen_all_internal 644 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
1740 * used to ensure the correct locking 645 break;
1741 *
1742 * returns: 0 - initiated action successfully
1743 * <0 - failed to initiate action
1744 */
1745int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask,
1746 u8 id, void *ref)
1747{
1748 int retval;
1749 unsigned long flags;
1750
1751 read_lock_irqsave(&zfcp_data.config_lock, flags);
1752 write_lock(&adapter->erp_lock);
1753 retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id,
1754 ref);
1755 write_unlock(&adapter->erp_lock);
1756 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1757
1758 return retval;
1759}
1760 646
1761static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, 647 if (!(atomic_read(&adapter->status) &
1762 int clear_mask, u8 id, void *ref) 648 ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
1763{ 649 break;
1764 int retval = 0;
1765 struct zfcp_port *port;
1766 650
1767 list_for_each_entry(port, &adapter->port_list_head, list) 651 ssleep(sleep);
1768 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) 652 sleep *= 2;
1769 zfcp_erp_port_reopen_internal(port, clear_mask, id, 653 }
1770 ref);
1771 654
1772 return retval; 655 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
1773} 656 &adapter->status);
1774 657
1775/* 658 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
1776 * function: 659 return ZFCP_ERP_FAILED;
1777 *
1778 * purpose:
1779 *
1780 * returns: FIXME
1781 */
1782static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port,
1783 int clear_mask, u8 id, void *ref)
1784{
1785 int retval = 0;
1786 struct zfcp_unit *unit;
1787 660
1788 list_for_each_entry(unit, &port->unit_list_head, list) 661 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
1789 zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); 662 zfcp_erp_enqueue_ptp_port(adapter);
1790 663
1791 return retval; 664 return ZFCP_ERP_SUCCEEDED;
1792} 665}
1793 666
1794/* 667static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
1795 * function:
1796 *
1797 * purpose: this routine executes the 'Reopen Adapter' action
1798 * (the entire action is processed synchronously, since
1799 * there are no actions which might be run concurrently
1800 * per definition)
1801 *
1802 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1803 * ZFCP_ERP_FAILED - action finished unsuccessfully
1804 */
1805static int
1806zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
1807{ 668{
1808 int retval; 669 int ret;
1809 struct zfcp_adapter *adapter = erp_action->adapter; 670 struct zfcp_adapter *adapter = act->adapter;
1810
1811 retval = zfcp_erp_adapter_strategy_close(erp_action);
1812 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
1813 retval = ZFCP_ERP_EXIT;
1814 else
1815 retval = zfcp_erp_adapter_strategy_open(erp_action);
1816 671
1817 if (retval == ZFCP_ERP_FAILED) { 672 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
1818 ZFCP_LOG_INFO("Waiting to allow the adapter %s "
1819 "to recover itself\n",
1820 zfcp_get_busid_by_adapter(adapter));
1821 ssleep(ZFCP_TYPE2_RECOVERY_TIME);
1822 }
1823 673
1824 return retval; 674 write_lock_irq(&adapter->erp_lock);
1825} 675 zfcp_erp_action_to_running(act);
676 write_unlock_irq(&adapter->erp_lock);
1826 677
1827/* 678 ret = zfcp_fsf_exchange_port_data(act);
1828 * function: 679 if (ret == -EOPNOTSUPP)
1829 * 680 return ZFCP_ERP_SUCCEEDED;
1830 * purpose: 681 if (ret)
1831 * 682 return ZFCP_ERP_FAILED;
1832 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1833 * ZFCP_ERP_FAILED - action finished unsuccessfully
1834 */
1835static int
1836zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action)
1837{
1838 int retval;
1839 683
1840 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, 684 zfcp_rec_dbf_event_thread_lock(8, adapter);
1841 &erp_action->adapter->status); 685 down(&adapter->erp_ready_sem);
1842 retval = zfcp_erp_adapter_strategy_generic(erp_action, 1); 686 zfcp_rec_dbf_event_thread_lock(9, adapter);
1843 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, 687 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
1844 &erp_action->adapter->status); 688 return ZFCP_ERP_FAILED;
1845 689
1846 return retval; 690 return ZFCP_ERP_SUCCEEDED;
1847} 691}
1848 692
1849/* 693static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
1850 * function:
1851 *
1852 * purpose:
1853 *
1854 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1855 * ZFCP_ERP_FAILED - action finished unsuccessfully
1856 */
1857static int
1858zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action)
1859{ 694{
1860 int retval; 695 if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
696 return ZFCP_ERP_FAILED;
1861 697
1862 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, 698 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
1863 &erp_action->adapter->status); 699 return ZFCP_ERP_FAILED;
1864 retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
1865 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING,
1866 &erp_action->adapter->status);
1867 700
1868 return retval; 701 atomic_set(&act->adapter->stat_miss, 16);
702 if (zfcp_status_read_refill(act->adapter))
703 return ZFCP_ERP_FAILED;
704
705 return ZFCP_ERP_SUCCEEDED;
1869} 706}
1870 707
1871/* 708static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
1872 * function: zfcp_register_adapter 709 int close)
1873 *
1874 * purpose: allocate the irq associated with this devno and register
1875 * the FSF adapter with the SCSI stack
1876 *
1877 * returns:
1878 */
1879static int
1880zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
1881{ 710{
1882 int retval = ZFCP_ERP_SUCCEEDED; 711 int retval = ZFCP_ERP_SUCCEEDED;
712 struct zfcp_adapter *adapter = act->adapter;
1883 713
1884 if (close) 714 if (close)
1885 goto close_only; 715 goto close_only;
1886 716
1887 retval = zfcp_erp_adapter_strategy_open_qdio(erp_action); 717 retval = zfcp_erp_adapter_strategy_open_qdio(act);
1888 if (retval != ZFCP_ERP_SUCCEEDED) 718 if (retval != ZFCP_ERP_SUCCEEDED)
1889 goto failed_qdio; 719 goto failed_qdio;
1890 720
1891 retval = zfcp_erp_adapter_strategy_open_fsf(erp_action); 721 retval = zfcp_erp_adapter_strategy_open_fsf(act);
1892 if (retval != ZFCP_ERP_SUCCEEDED) 722 if (retval != ZFCP_ERP_SUCCEEDED)
1893 goto failed_openfcp; 723 goto failed_openfcp;
1894 724
1895 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status); 725 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
1896 goto out; 726 schedule_work(&act->adapter->scan_work);
727
728 return ZFCP_ERP_SUCCEEDED;
1897 729
1898 close_only: 730 close_only:
1899 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 731 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1900 &erp_action->adapter->status); 732 &act->adapter->status);
1901 733
1902 failed_openfcp: 734 failed_openfcp:
1903 zfcp_close_fsf(erp_action->adapter); 735 /* close queues to ensure that buffers are not accessed by adapter */
736 zfcp_qdio_close(adapter);
737 zfcp_fsf_req_dismiss_all(adapter);
738 adapter->fsf_req_seq_no = 0;
739 /* all ports and units are closed */
740 zfcp_erp_modify_adapter_status(adapter, 24, NULL,
741 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
1904 failed_qdio: 742 failed_qdio:
1905 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 743 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
1906 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 744 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
1907 ZFCP_STATUS_ADAPTER_XPORT_OK, 745 ZFCP_STATUS_ADAPTER_XPORT_OK,
1908 &erp_action->adapter->status); 746 &act->adapter->status);
1909 out:
1910 return retval; 747 return retval;
1911} 748}
1912 749
1913/* 750static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
1914 * function: zfcp_qdio_init
1915 *
1916 * purpose: setup QDIO operation for specified adapter
1917 *
1918 * returns: 0 - successful setup
1919 * !0 - failed setup
1920 */
1921static int
1922zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
1923{ 751{
1924 int retval; 752 int retval;
1925 int i;
1926 volatile struct qdio_buffer_element *sbale;
1927 struct zfcp_adapter *adapter = erp_action->adapter;
1928
1929 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
1930 ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
1931 "adapter %s\n",
1932 zfcp_get_busid_by_adapter(adapter));
1933 goto failed_sanity;
1934 }
1935
1936 if (qdio_establish(&adapter->qdio_init_data) != 0) {
1937 ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
1938 "on adapter %s\n",
1939 zfcp_get_busid_by_adapter(adapter));
1940 goto failed_qdio_establish;
1941 }
1942
1943 if (qdio_activate(adapter->ccw_device, 0) != 0) {
1944 ZFCP_LOG_INFO("error: activation of QDIO queues failed "
1945 "on adapter %s\n",
1946 zfcp_get_busid_by_adapter(adapter));
1947 goto failed_qdio_activate;
1948 }
1949
1950 /*
1951 * put buffers into response queue,
1952 */
1953 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
1954 sbale = &(adapter->response_queue.buffer[i]->element[0]);
1955 sbale->length = 0;
1956 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
1957 sbale->addr = NULL;
1958 }
1959
1960 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
1961 "queue_no=%i, index_in_queue=%i, count=%i)\n",
1962 zfcp_get_busid_by_adapter(adapter),
1963 QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
1964
1965 retval = do_QDIO(adapter->ccw_device,
1966 QDIO_FLAG_SYNC_INPUT,
1967 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
1968
1969 if (retval) {
1970 ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
1971 retval);
1972 goto failed_do_qdio;
1973 } else {
1974 adapter->response_queue.free_index = 0;
1975 atomic_set(&adapter->response_queue.free_count, 0);
1976 ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
1977 "response queue\n", QDIO_MAX_BUFFERS_PER_Q);
1978 }
1979 /* set index of first avalable SBALS / number of available SBALS */
1980 adapter->request_queue.free_index = 0;
1981 atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
1982 adapter->request_queue.distance_from_int = 0;
1983
1984 /* initialize waitqueue used to wait for free SBALs in requests queue */
1985 init_waitqueue_head(&adapter->request_wq);
1986 753
1987 /* ok, we did it - skip all cleanups for different failures */ 754 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
1988 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 755 zfcp_erp_adapter_strategy_generic(act, 1); /* close */
1989 retval = ZFCP_ERP_SUCCEEDED; 756 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
1990 goto out; 757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT;
1991 759
1992 failed_do_qdio: 760 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
1993 /* NOP */ 761 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
762 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
1994 763
1995 failed_qdio_activate: 764 if (retval == ZFCP_ERP_FAILED)
1996 while (qdio_shutdown(adapter->ccw_device, 765 ssleep(8);
1997 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
1998 ssleep(1);
1999
2000 failed_qdio_establish:
2001 failed_sanity:
2002 retval = ZFCP_ERP_FAILED;
2003 766
2004 out:
2005 return retval; 767 return retval;
2006} 768}
2007 769
2008 770static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
2009static int
2010zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2011{ 771{
2012 int retval; 772 int retval;
2013 773
2014 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 774 retval = zfcp_fsf_close_physical_port(act);
2015 if (retval == ZFCP_ERP_FAILED) 775 if (retval == -ENOMEM)
776 return ZFCP_ERP_NOMEM;
777 act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
778 if (retval)
2016 return ZFCP_ERP_FAILED; 779 return ZFCP_ERP_FAILED;
2017 780
2018 retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 781 return ZFCP_ERP_CONTINUES;
2019 if (retval == ZFCP_ERP_FAILED)
2020 return ZFCP_ERP_FAILED;
2021
2022 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2023} 782}
2024 783
2025static int 784static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
2026zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2027{ 785{
2028 int retval = ZFCP_ERP_SUCCEEDED; 786 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
2029 int retries; 787 ZFCP_STATUS_COMMON_CLOSING |
2030 int sleep = ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP; 788 ZFCP_STATUS_COMMON_ACCESS_DENIED |
2031 struct zfcp_adapter *adapter = erp_action->adapter; 789 ZFCP_STATUS_PORT_DID_DID |
2032 790 ZFCP_STATUS_PORT_PHYS_CLOSING |
2033 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); 791 ZFCP_STATUS_PORT_INVALID_WWPN,
2034 792 &port->status);
2035 for (retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES; retries; retries--) { 793}
2036 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2037 &adapter->status);
2038 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2039 write_lock_irq(&adapter->erp_lock);
2040 zfcp_erp_action_to_running(erp_action);
2041 write_unlock_irq(&adapter->erp_lock);
2042 if (zfcp_fsf_exchange_config_data(erp_action)) {
2043 retval = ZFCP_ERP_FAILED;
2044 ZFCP_LOG_INFO("error: initiation of exchange of "
2045 "configuration data failed for "
2046 "adapter %s\n",
2047 zfcp_get_busid_by_adapter(adapter));
2048 break;
2049 }
2050 ZFCP_LOG_DEBUG("Xchange underway\n");
2051
2052 /*
2053 * Why this works:
2054 * Both the normal completion handler as well as the timeout
2055 * handler will do an 'up' when the 'exchange config data'
2056 * request completes or times out. Thus, the signal to go on
2057 * won't be lost utilizing this semaphore.
2058 * Furthermore, this 'adapter_reopen' action is
2059 * guaranteed to be the only action being there (highest action
2060 * which prevents other actions from being created).
2061 * Resulting from that, the wake signal recognized here
2062 * _must_ be the one belonging to the 'exchange config
2063 * data' request.
2064 */
2065 zfcp_rec_dbf_event_thread(6, adapter, 1);
2066 down(&adapter->erp_ready_sem);
2067 zfcp_rec_dbf_event_thread(7, adapter, 1);
2068 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2069 ZFCP_LOG_INFO("error: exchange of configuration data "
2070 "for adapter %s timed out\n",
2071 zfcp_get_busid_by_adapter(adapter));
2072 break;
2073 }
2074
2075 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2076 &adapter->status))
2077 break;
2078 794
2079 ZFCP_LOG_DEBUG("host connection still initialising... " 795static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
2080 "waiting and retrying...\n"); 796{
2081 /* sleep a little bit before retry */ 797 struct zfcp_port *port = erp_action->port;
2082 ssleep(sleep); 798 int status = atomic_read(&port->status);
2083 sleep *= 2;
2084 }
2085 799
2086 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 800 switch (erp_action->step) {
2087 &adapter->status); 801 case ZFCP_ERP_STEP_UNINITIALIZED:
802 zfcp_erp_port_strategy_clearstati(port);
803 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
804 (status & ZFCP_STATUS_COMMON_OPEN))
805 return zfcp_erp_port_forced_strategy_close(erp_action);
806 else
807 return ZFCP_ERP_FAILED;
2088 808
2089 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 809 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2090 &adapter->status)) { 810 if (status & ZFCP_STATUS_PORT_PHYS_OPEN)
2091 ZFCP_LOG_INFO("error: exchange of configuration data for " 811 return ZFCP_ERP_SUCCEEDED;
2092 "adapter %s failed\n",
2093 zfcp_get_busid_by_adapter(adapter));
2094 retval = ZFCP_ERP_FAILED;
2095 } 812 }
2096 813 return ZFCP_ERP_FAILED;
2097 return retval;
2098} 814}
2099 815
2100static int 816static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2101zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2102{ 817{
2103 int ret; 818 int retval;
2104 struct zfcp_adapter *adapter;
2105
2106 adapter = erp_action->adapter;
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2108
2109 write_lock_irq(&adapter->erp_lock);
2110 zfcp_erp_action_to_running(erp_action);
2111 write_unlock_irq(&adapter->erp_lock);
2112 819
2113 ret = zfcp_fsf_exchange_port_data(erp_action); 820 retval = zfcp_fsf_close_port(erp_action);
2114 if (ret == -EOPNOTSUPP) { 821 if (retval == -ENOMEM)
2115 return ZFCP_ERP_SUCCEEDED; 822 return ZFCP_ERP_NOMEM;
2116 } else if (ret) { 823 erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
824 if (retval)
2117 return ZFCP_ERP_FAILED; 825 return ZFCP_ERP_FAILED;
2118 } 826 return ZFCP_ERP_CONTINUES;
2119
2120 ret = ZFCP_ERP_SUCCEEDED;
2121 zfcp_rec_dbf_event_thread(8, adapter, 1);
2122 down(&adapter->erp_ready_sem);
2123 zfcp_rec_dbf_event_thread(9, adapter, 1);
2124 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2125 ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
2126 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2127 ret = ZFCP_ERP_FAILED;
2128 }
2129
2130 /* don't treat as error for the sake of compatibility */
2131 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2132 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2133 "%s\n", zfcp_get_busid_by_adapter(adapter));
2134
2135 return ret;
2136} 827}
2137 828
2138static int 829static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2139zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2140 *erp_action)
2141{ 830{
2142 int retval = ZFCP_ERP_SUCCEEDED; 831 int retval;
2143 int temp_ret;
2144 struct zfcp_adapter *adapter = erp_action->adapter;
2145 int i;
2146
2147 adapter->status_read_failed = 0;
2148 for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
2149 temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL);
2150 if (temp_ret < 0) {
2151 ZFCP_LOG_INFO("error: set-up of unsolicited status "
2152 "notification failed on adapter %s\n",
2153 zfcp_get_busid_by_adapter(adapter));
2154 retval = ZFCP_ERP_FAILED;
2155 i--;
2156 break;
2157 }
2158 }
2159 832
2160 return retval; 833 retval = zfcp_fsf_open_port(erp_action);
834 if (retval == -ENOMEM)
835 return ZFCP_ERP_NOMEM;
836 erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
837 if (retval)
838 return ZFCP_ERP_FAILED;
839 return ZFCP_ERP_CONTINUES;
2161} 840}
2162 841
2163/* 842static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
2164 * function:
2165 *
2166 * purpose: this routine executes the 'Reopen Physical Port' action
2167 *
2168 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2169 * ZFCP_ERP_SUCCEEDED - action finished successfully
2170 * ZFCP_ERP_FAILED - action finished unsuccessfully
2171 */
2172static int
2173zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
2174{ 843{
2175 int retval = ZFCP_ERP_FAILED; 844 unsigned long flags;
2176 struct zfcp_port *port = erp_action->port; 845 struct zfcp_adapter *adapter = ns_act->adapter;
2177 846 struct zfcp_erp_action *act, *tmp;
2178 switch (erp_action->step) { 847 int status;
2179
2180 /*
2181 * FIXME:
2182 * the ULP spec. begs for waiting for oustanding commands
2183 */
2184 case ZFCP_ERP_STEP_UNINITIALIZED:
2185 zfcp_erp_port_strategy_clearstati(port);
2186 /*
2187 * it would be sufficient to test only the normal open flag
2188 * since the phys. open flag cannot be set if the normal
2189 * open flag is unset - however, this is for readabilty ...
2190 */
2191 if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN |
2192 ZFCP_STATUS_COMMON_OPEN),
2193 &port->status)) {
2194 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2195 "close physical\n", port->wwpn);
2196 retval =
2197 zfcp_erp_port_forced_strategy_close(erp_action);
2198 } else
2199 retval = ZFCP_ERP_FAILED;
2200 break;
2201 848
2202 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 849 read_lock_irqsave(&adapter->erp_lock, flags);
2203 if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN, 850 list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
2204 &port->status)) { 851 if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
2205 ZFCP_LOG_DEBUG("close physical failed for port " 852 status = atomic_read(&adapter->nameserver_port->status);
2206 "0x%016Lx\n", port->wwpn); 853 if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
2207 retval = ZFCP_ERP_FAILED; 854 zfcp_erp_port_failed(act->port, 27, NULL);
2208 } else 855 zfcp_erp_action_ready(act);
2209 retval = ZFCP_ERP_SUCCEEDED; 856 }
2210 break;
2211 } 857 }
2212 858 read_unlock_irqrestore(&adapter->erp_lock, flags);
2213 return retval;
2214} 859}
2215 860
2216/* 861static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
2217 * function:
2218 *
2219 * purpose: this routine executes the 'Reopen Port' action
2220 *
2221 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2222 * ZFCP_ERP_SUCCEEDED - action finished successfully
2223 * ZFCP_ERP_FAILED - action finished unsuccessfully
2224 */
2225static int
2226zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
2227{ 862{
2228 int retval = ZFCP_ERP_FAILED; 863 int retval;
2229 struct zfcp_port *port = erp_action->port;
2230
2231 switch (erp_action->step) {
2232 864
2233 /* 865 switch (act->step) {
2234 * FIXME:
2235 * the ULP spec. begs for waiting for oustanding commands
2236 */
2237 case ZFCP_ERP_STEP_UNINITIALIZED: 866 case ZFCP_ERP_STEP_UNINITIALIZED:
2238 zfcp_erp_port_strategy_clearstati(port); 867 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2239 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
2240 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2241 "close\n", port->wwpn);
2242 retval = zfcp_erp_port_strategy_close(erp_action);
2243 goto out;
2244 } /* else it's already closed, open it */
2245 break;
2246
2247 case ZFCP_ERP_STEP_PORT_CLOSING: 868 case ZFCP_ERP_STEP_PORT_CLOSING:
2248 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { 869 return zfcp_erp_port_strategy_open_port(act);
2249 ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n", 870
2250 port->wwpn); 871 case ZFCP_ERP_STEP_PORT_OPENING:
872 if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
873 retval = ZFCP_ERP_SUCCEEDED;
874 else
2251 retval = ZFCP_ERP_FAILED; 875 retval = ZFCP_ERP_FAILED;
2252 goto out; 876 /* this is needed anyway */
2253 } /* else it's closed now, open it */ 877 zfcp_erp_port_strategy_open_ns_wake(act);
2254 break; 878 return retval;
2255 }
2256 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2257 retval = ZFCP_ERP_EXIT;
2258 else
2259 retval = zfcp_erp_port_strategy_open(erp_action);
2260 879
2261 out: 880 default:
2262 return retval; 881 return ZFCP_ERP_FAILED;
882 }
2263} 883}
2264 884
2265static int 885static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
2266zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action)
2267{ 886{
2268 int retval; 887 int retval;
2269 888
2270 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, 889 retval = zfcp_fc_ns_gid_pn_request(act);
2271 &erp_action->port->status)) 890 if (retval == -ENOMEM)
2272 retval = zfcp_erp_port_strategy_open_nameserver(erp_action); 891 return ZFCP_ERP_NOMEM;
2273 else 892 act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
2274 retval = zfcp_erp_port_strategy_open_common(erp_action); 893 if (retval)
2275 894 return ZFCP_ERP_FAILED;
2276 return retval; 895 return ZFCP_ERP_CONTINUES;
2277} 896}
2278 897
2279static int 898static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
2280zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
2281{ 899{
2282 int retval = 0; 900 struct zfcp_adapter *adapter = act->adapter;
2283 struct zfcp_adapter *adapter = erp_action->adapter; 901 struct zfcp_port *port = act->port;
2284 struct zfcp_port *port = erp_action->port;
2285 902
2286 switch (erp_action->step) { 903 if (port->wwpn != adapter->peer_wwpn) {
904 dev_err(&adapter->ccw_device->dev,
905 "Failed to open port 0x%016Lx, "
906 "Peer WWPN 0x%016Lx does not "
907 "match.\n", port->wwpn,
908 adapter->peer_wwpn);
909 zfcp_erp_port_failed(port, 25, NULL);
910 return ZFCP_ERP_FAILED;
911 }
912 port->d_id = adapter->peer_d_id;
913 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
914 return zfcp_erp_port_strategy_open_port(act);
915}
916
917static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
918{
919 struct zfcp_adapter *adapter = act->adapter;
920 struct zfcp_port *port = act->port;
921 struct zfcp_port *ns_port = adapter->nameserver_port;
922 int p_status = atomic_read(&port->status);
2287 923
924 switch (act->step) {
2288 case ZFCP_ERP_STEP_UNINITIALIZED: 925 case ZFCP_ERP_STEP_UNINITIALIZED:
2289 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 926 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2290 case ZFCP_ERP_STEP_PORT_CLOSING: 927 case ZFCP_ERP_STEP_PORT_CLOSING:
2291 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) { 928 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
2292 if (port->wwpn != adapter->peer_wwpn) { 929 return zfcp_erp_open_ptp_port(act);
2293 ZFCP_LOG_NORMAL("Failed to open port 0x%016Lx " 930 if (!ns_port) {
2294 "on adapter %s.\nPeer WWPN " 931 dev_err(&adapter->ccw_device->dev,
2295 "0x%016Lx does not match\n", 932 "Nameserver port unavailable.\n");
2296 port->wwpn, 933 return ZFCP_ERP_FAILED;
2297 zfcp_get_busid_by_adapter(adapter),
2298 adapter->peer_wwpn);
2299 zfcp_erp_port_failed(port, 25, NULL);
2300 retval = ZFCP_ERP_FAILED;
2301 break;
2302 }
2303 port->d_id = adapter->peer_d_id;
2304 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
2305 retval = zfcp_erp_port_strategy_open_port(erp_action);
2306 break;
2307 } 934 }
2308 if (!(adapter->nameserver_port)) { 935 if (!(atomic_read(&ns_port->status) &
2309 retval = zfcp_nameserver_enqueue(adapter); 936 ZFCP_STATUS_COMMON_UNBLOCKED)) {
2310 if (retval != 0) {
2311 ZFCP_LOG_NORMAL("error: nameserver port "
2312 "unavailable for adapter %s\n",
2313 zfcp_get_busid_by_adapter(adapter));
2314 retval = ZFCP_ERP_FAILED;
2315 break;
2316 }
2317 }
2318 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
2319 &adapter->nameserver_port->status)) {
2320 ZFCP_LOG_DEBUG("nameserver port is not open -> open "
2321 "nameserver port\n");
2322 /* nameserver port may live again */ 937 /* nameserver port may live again */
2323 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, 938 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
2324 &adapter->nameserver_port->status); 939 &ns_port->status);
2325 if (zfcp_erp_port_reopen(adapter->nameserver_port, 0, 940 if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
2326 77, erp_action) >= 0) { 941 act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
2327 erp_action->step = 942 return ZFCP_ERP_CONTINUES;
2328 ZFCP_ERP_STEP_NAMESERVER_OPEN; 943 }
2329 retval = ZFCP_ERP_CONTINUES; 944 return ZFCP_ERP_FAILED;
2330 } else
2331 retval = ZFCP_ERP_FAILED;
2332 break;
2333 } 945 }
2334 /* else nameserver port is already open, fall through */ 946 /* else nameserver port is already open, fall through */
2335 case ZFCP_ERP_STEP_NAMESERVER_OPEN: 947 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
2336 if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, 948 if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
2337 &adapter->nameserver_port->status)) { 949 return ZFCP_ERP_FAILED;
2338 ZFCP_LOG_DEBUG("open failed for nameserver port\n"); 950 return zfcp_erp_port_strategy_open_lookup(act);
2339 retval = ZFCP_ERP_FAILED;
2340 } else {
2341 ZFCP_LOG_DEBUG("nameserver port is open -> "
2342 "nameserver look-up for port 0x%016Lx\n",
2343 port->wwpn);
2344 retval = zfcp_erp_port_strategy_open_common_lookup
2345 (erp_action);
2346 }
2347 break;
2348 951
2349 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 952 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
2350 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) { 953 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
2351 if (atomic_test_mask 954 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
2352 (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
2353 ZFCP_LOG_DEBUG("nameserver look-up failed "
2354 "for port 0x%016Lx "
2355 "(misconfigured WWPN?)\n",
2356 port->wwpn);
2357 zfcp_erp_port_failed(port, 26, NULL); 955 zfcp_erp_port_failed(port, 26, NULL);
2358 retval = ZFCP_ERP_EXIT; 956 return ZFCP_ERP_EXIT;
2359 } else {
2360 ZFCP_LOG_DEBUG("nameserver look-up failed for "
2361 "port 0x%016Lx\n", port->wwpn);
2362 retval = ZFCP_ERP_FAILED;
2363 } 957 }
2364 } else { 958 return ZFCP_ERP_FAILED;
2365 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> "
2366 "trying open\n", port->wwpn, port->d_id);
2367 retval = zfcp_erp_port_strategy_open_port(erp_action);
2368 } 959 }
2369 break; 960 return zfcp_erp_port_strategy_open_port(act);
2370 961
2371 case ZFCP_ERP_STEP_PORT_OPENING: 962 case ZFCP_ERP_STEP_PORT_OPENING:
2372 /* D_ID might have changed during open */ 963 /* D_ID might have changed during open */
2373 if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN | 964 if ((p_status & ZFCP_STATUS_COMMON_OPEN) &&
2374 ZFCP_STATUS_PORT_DID_DID), 965 (p_status & ZFCP_STATUS_PORT_DID_DID))
2375 &port->status)) { 966 return ZFCP_ERP_SUCCEEDED;
2376 ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn); 967 /* fall through otherwise */
2377 retval = ZFCP_ERP_SUCCEEDED;
2378 } else {
2379 ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n",
2380 port->wwpn);
2381 retval = ZFCP_ERP_FAILED;
2382 }
2383 break;
2384
2385 default:
2386 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2387 erp_action->step);
2388 retval = ZFCP_ERP_FAILED;
2389 } 968 }
969 return ZFCP_ERP_FAILED;
970}
2390 971
2391 return retval; 972static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
973{
974 if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
975 return zfcp_erp_port_strategy_open_nameserver(act);
976 return zfcp_erp_port_strategy_open_common(act);
2392} 977}
2393 978
2394static int 979static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
2395zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
2396{ 980{
2397 int retval;
2398 struct zfcp_port *port = erp_action->port; 981 struct zfcp_port *port = erp_action->port;
2399 982
2400 switch (erp_action->step) { 983 switch (erp_action->step) {
2401
2402 case ZFCP_ERP_STEP_UNINITIALIZED: 984 case ZFCP_ERP_STEP_UNINITIALIZED:
2403 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 985 zfcp_erp_port_strategy_clearstati(port);
2404 case ZFCP_ERP_STEP_PORT_CLOSING: 986 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
2405 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n", 987 return zfcp_erp_port_strategy_close(erp_action);
2406 port->wwpn, port->d_id);
2407 retval = zfcp_erp_port_strategy_open_port(erp_action);
2408 break; 988 break;
2409 989
2410 case ZFCP_ERP_STEP_PORT_OPENING: 990 case ZFCP_ERP_STEP_PORT_CLOSING:
2411 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { 991 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
2412 ZFCP_LOG_DEBUG("WKA port is open\n"); 992 return ZFCP_ERP_FAILED;
2413 retval = ZFCP_ERP_SUCCEEDED;
2414 } else {
2415 ZFCP_LOG_DEBUG("open failed for WKA port\n");
2416 retval = ZFCP_ERP_FAILED;
2417 }
2418 /* this is needed anyway (dont care for retval of wakeup) */
2419 ZFCP_LOG_DEBUG("continue other open port operations\n");
2420 zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
2421 break; 993 break;
2422
2423 default:
2424 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2425 erp_action->step);
2426 retval = ZFCP_ERP_FAILED;
2427 } 994 }
995 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
996 return ZFCP_ERP_EXIT;
997 else
998 return zfcp_erp_port_strategy_open(erp_action);
2428 999
2429 return retval; 1000 return ZFCP_ERP_FAILED;
2430}
2431
2432/*
2433 * function:
2434 *
2435 * purpose: makes the erp thread continue with reopen (physical) port
2436 * actions which have been paused until the name server port
2437 * is opened (or failed)
2438 *
2439 * returns: 0 (a kind of void retval, its not used)
2440 */
2441static int
2442zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action
2443 *ns_erp_action)
2444{
2445 int retval = 0;
2446 unsigned long flags;
2447 struct zfcp_adapter *adapter = ns_erp_action->adapter;
2448 struct zfcp_erp_action *erp_action, *tmp;
2449
2450 read_lock_irqsave(&adapter->erp_lock, flags);
2451 list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
2452 list) {
2453 if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
2454 if (atomic_test_mask(
2455 ZFCP_STATUS_COMMON_ERP_FAILED,
2456 &adapter->nameserver_port->status))
2457 zfcp_erp_port_failed(erp_action->port, 27,
2458 NULL);
2459 zfcp_erp_action_ready(erp_action);
2460 }
2461 }
2462 read_unlock_irqrestore(&adapter->erp_lock, flags);
2463
2464 return retval;
2465}
2466
2467/*
2468 * function:
2469 *
2470 * purpose:
2471 *
2472 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2473 * ZFCP_ERP_FAILED - action finished unsuccessfully
2474 */
2475static int
2476zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2477{
2478 int retval;
2479
2480 retval = zfcp_fsf_close_physical_port(erp_action);
2481 if (retval == -ENOMEM) {
2482 retval = ZFCP_ERP_NOMEM;
2483 goto out;
2484 }
2485 erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
2486 if (retval != 0) {
2487 /* could not send 'open', fail */
2488 retval = ZFCP_ERP_FAILED;
2489 goto out;
2490 }
2491 retval = ZFCP_ERP_CONTINUES;
2492 out:
2493 return retval;
2494} 1001}
2495 1002
2496static int 1003static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
2497zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
2498{ 1004{
2499 int retval = 0;
2500
2501 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 1005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
2502 ZFCP_STATUS_COMMON_CLOSING | 1006 ZFCP_STATUS_COMMON_CLOSING |
2503 ZFCP_STATUS_COMMON_ACCESS_DENIED | 1007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
2504 ZFCP_STATUS_PORT_DID_DID | 1008 ZFCP_STATUS_UNIT_SHARED |
2505 ZFCP_STATUS_PORT_PHYS_CLOSING | 1009 ZFCP_STATUS_UNIT_READONLY,
2506 ZFCP_STATUS_PORT_INVALID_WWPN, 1010 &unit->status);
2507 &port->status);
2508 return retval;
2509}
2510
2511/*
2512 * function:
2513 *
2514 * purpose:
2515 *
2516 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2517 * ZFCP_ERP_FAILED - action finished unsuccessfully
2518 */
2519static int
2520zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2521{
2522 int retval;
2523
2524 retval = zfcp_fsf_close_port(erp_action);
2525 if (retval == -ENOMEM) {
2526 retval = ZFCP_ERP_NOMEM;
2527 goto out;
2528 }
2529 erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
2530 if (retval != 0) {
2531 /* could not send 'close', fail */
2532 retval = ZFCP_ERP_FAILED;
2533 goto out;
2534 }
2535 retval = ZFCP_ERP_CONTINUES;
2536 out:
2537 return retval;
2538} 1011}
2539 1012
2540/* 1013static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2541 * function:
2542 *
2543 * purpose:
2544 *
2545 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2546 * ZFCP_ERP_FAILED - action finished unsuccessfully
2547 */
2548static int
2549zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2550{ 1014{
2551 int retval; 1015 int retval = zfcp_fsf_close_unit(erp_action);
2552 1016 if (retval == -ENOMEM)
2553 retval = zfcp_fsf_open_port(erp_action); 1017 return ZFCP_ERP_NOMEM;
2554 if (retval == -ENOMEM) { 1018 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
2555 retval = ZFCP_ERP_NOMEM; 1019 if (retval)
2556 goto out; 1020 return ZFCP_ERP_FAILED;
2557 } 1021 return ZFCP_ERP_CONTINUES;
2558 erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
2559 if (retval != 0) {
2560 /* could not send 'open', fail */
2561 retval = ZFCP_ERP_FAILED;
2562 goto out;
2563 }
2564 retval = ZFCP_ERP_CONTINUES;
2565 out:
2566 return retval;
2567} 1022}
2568 1023
2569/* 1024static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2570 * function:
2571 *
2572 * purpose:
2573 *
2574 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2575 * ZFCP_ERP_FAILED - action finished unsuccessfully
2576 */
2577static int
2578zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2579{ 1025{
2580 int retval; 1026 int retval = zfcp_fsf_open_unit(erp_action);
2581 1027 if (retval == -ENOMEM)
2582 retval = zfcp_ns_gid_pn_request(erp_action); 1028 return ZFCP_ERP_NOMEM;
2583 if (retval == -ENOMEM) { 1029 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
2584 retval = ZFCP_ERP_NOMEM; 1030 if (retval)
2585 goto out; 1031 return ZFCP_ERP_FAILED;
2586 } 1032 return ZFCP_ERP_CONTINUES;
2587 erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
2588 if (retval != 0) {
2589 /* could not send nameserver request, fail */
2590 retval = ZFCP_ERP_FAILED;
2591 goto out;
2592 }
2593 retval = ZFCP_ERP_CONTINUES;
2594 out:
2595 return retval;
2596} 1033}
2597 1034
2598/* 1035static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
2599 * function:
2600 *
2601 * purpose: this routine executes the 'Reopen Unit' action
2602 * currently no retries
2603 *
2604 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2605 * ZFCP_ERP_SUCCEEDED - action finished successfully
2606 * ZFCP_ERP_FAILED - action finished unsuccessfully
2607 */
2608static int
2609zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
2610{ 1036{
2611 int retval = ZFCP_ERP_FAILED;
2612 struct zfcp_unit *unit = erp_action->unit; 1037 struct zfcp_unit *unit = erp_action->unit;
2613 1038
2614 switch (erp_action->step) { 1039 switch (erp_action->step) {
2615
2616 /*
2617 * FIXME:
2618 * the ULP spec. begs for waiting for oustanding commands
2619 */
2620 case ZFCP_ERP_STEP_UNINITIALIZED: 1040 case ZFCP_ERP_STEP_UNINITIALIZED:
2621 zfcp_erp_unit_strategy_clearstati(unit); 1041 zfcp_erp_unit_strategy_clearstati(unit);
2622 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1042 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2623 ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> " 1043 return zfcp_erp_unit_strategy_close(erp_action);
2624 "trying close\n", unit->fcp_lun); 1044 /* already closed, fall through */
2625 retval = zfcp_erp_unit_strategy_close(erp_action);
2626 break;
2627 }
2628 /* else it's already closed, fall through */
2629 case ZFCP_ERP_STEP_UNIT_CLOSING: 1045 case ZFCP_ERP_STEP_UNIT_CLOSING:
2630 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1046 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2631 ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n", 1047 return ZFCP_ERP_FAILED;
2632 unit->fcp_lun); 1048 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2633 retval = ZFCP_ERP_FAILED; 1049 return ZFCP_ERP_EXIT;
2634 } else { 1050 return zfcp_erp_unit_strategy_open(erp_action);
2635 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2636 retval = ZFCP_ERP_EXIT;
2637 else {
2638 ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> "
2639 "trying open\n", unit->fcp_lun);
2640 retval =
2641 zfcp_erp_unit_strategy_open(erp_action);
2642 }
2643 }
2644 break;
2645 1051
2646 case ZFCP_ERP_STEP_UNIT_OPENING: 1052 case ZFCP_ERP_STEP_UNIT_OPENING:
2647 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1053 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2648 ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n", 1054 return ZFCP_ERP_SUCCEEDED;
2649 unit->fcp_lun);
2650 retval = ZFCP_ERP_SUCCEEDED;
2651 } else {
2652 ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n",
2653 unit->fcp_lun);
2654 retval = ZFCP_ERP_FAILED;
2655 }
2656 break;
2657 } 1055 }
2658 1056 return ZFCP_ERP_FAILED;
2659 return retval;
2660} 1057}
2661 1058
2662static int 1059static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
2663zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
2664{ 1060{
2665 int retval = 0; 1061 switch (result) {
2666 1062 case ZFCP_ERP_SUCCEEDED :
2667 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 1063 atomic_set(&unit->erp_counter, 0);
2668 ZFCP_STATUS_COMMON_CLOSING | 1064 zfcp_erp_unit_unblock(unit);
2669 ZFCP_STATUS_COMMON_ACCESS_DENIED | 1065 break;
2670 ZFCP_STATUS_UNIT_SHARED | 1066 case ZFCP_ERP_FAILED :
2671 ZFCP_STATUS_UNIT_READONLY, 1067 atomic_inc(&unit->erp_counter);
2672 &unit->status); 1068 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
1069 zfcp_erp_unit_failed(unit, 21, NULL);
1070 break;
1071 }
2673 1072
2674 return retval; 1073 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
1074 zfcp_erp_unit_block(unit, 0);
1075 result = ZFCP_ERP_EXIT;
1076 }
1077 return result;
2675} 1078}
2676 1079
2677/* 1080static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
2678 * function:
2679 *
2680 * purpose:
2681 *
2682 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2683 * ZFCP_ERP_FAILED - action finished unsuccessfully
2684 */
2685static int
2686zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2687{ 1081{
2688 int retval; 1082 switch (result) {
1083 case ZFCP_ERP_SUCCEEDED :
1084 atomic_set(&port->erp_counter, 0);
1085 zfcp_erp_port_unblock(port);
1086 break;
2689 1087
2690 retval = zfcp_fsf_close_unit(erp_action); 1088 case ZFCP_ERP_FAILED :
2691 if (retval == -ENOMEM) { 1089 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
2692 retval = ZFCP_ERP_NOMEM; 1090 zfcp_erp_port_block(port, 0);
2693 goto out; 1091 result = ZFCP_ERP_EXIT;
2694 } 1092 }
2695 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; 1093 atomic_inc(&port->erp_counter);
2696 if (retval != 0) { 1094 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
2697 /* could not send 'close', fail */ 1095 zfcp_erp_port_failed(port, 22, NULL);
2698 retval = ZFCP_ERP_FAILED; 1096 break;
2699 goto out;
2700 } 1097 }
2701 retval = ZFCP_ERP_CONTINUES;
2702 1098
2703 out: 1099 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2704 return retval; 1100 zfcp_erp_port_block(port, 0);
1101 result = ZFCP_ERP_EXIT;
1102 }
1103 return result;
2705} 1104}
2706 1105
2707/* 1106static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
2708 * function: 1107 int result)
2709 *
2710 * purpose:
2711 *
2712 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2713 * ZFCP_ERP_FAILED - action finished unsuccessfully
2714 */
2715static int
2716zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2717{ 1108{
2718 int retval; 1109 switch (result) {
1110 case ZFCP_ERP_SUCCEEDED :
1111 atomic_set(&adapter->erp_counter, 0);
1112 zfcp_erp_adapter_unblock(adapter);
1113 break;
2719 1114
2720 retval = zfcp_fsf_open_unit(erp_action); 1115 case ZFCP_ERP_FAILED :
2721 if (retval == -ENOMEM) { 1116 atomic_inc(&adapter->erp_counter);
2722 retval = ZFCP_ERP_NOMEM; 1117 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
2723 goto out; 1118 zfcp_erp_adapter_failed(adapter, 23, NULL);
2724 } 1119 break;
2725 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
2726 if (retval != 0) {
2727 /* could not send 'open', fail */
2728 retval = ZFCP_ERP_FAILED;
2729 goto out;
2730 } 1120 }
2731 retval = ZFCP_ERP_CONTINUES;
2732 out:
2733 return retval;
2734}
2735 1121
2736void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req) 1122 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2737{ 1123 zfcp_erp_adapter_block(adapter, 0);
2738 BUG_ON(!fsf_req->erp_action); 1124 result = ZFCP_ERP_EXIT;
2739 fsf_req->timer.function = zfcp_erp_timeout_handler; 1125 }
2740 fsf_req->timer.data = (unsigned long) fsf_req->erp_action; 1126 return result;
2741 fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
2742 add_timer(&fsf_req->timer);
2743} 1127}
2744 1128
2745/* 1129static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
2746 * function: 1130 int result)
2747 *
2748 * purpose: enqueue the specified error recovery action, if needed
2749 *
2750 * returns:
2751 */
2752static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
2753 struct zfcp_port *port,
2754 struct zfcp_unit *unit, u8 id, void *ref)
2755{ 1131{
2756 int retval = 1, need = want; 1132 struct zfcp_adapter *adapter = erp_action->adapter;
2757 struct zfcp_erp_action *erp_action = NULL; 1133 struct zfcp_port *port = erp_action->port;
2758 u32 status = 0; 1134 struct zfcp_unit *unit = erp_action->unit;
2759 1135
2760 /* 1136 switch (erp_action->action) {
2761 * We need some rules here which check whether we really need
2762 * this action or whether we should just drop it.
2763 * E.g. if there is a unfinished 'Reopen Port' request then we drop a
2764 * 'Reopen Unit' request for an associated unit since we can't
2765 * satisfy this request now. A 'Reopen Port' action will trigger
2766 * 'Reopen Unit' actions when it completes.
2767 * Thus, there are only actions in the queue which can immediately be
2768 * executed. This makes the processing of the action queue more
2769 * efficient.
2770 */
2771
2772 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
2773 &adapter->status))
2774 return -EIO;
2775 1137
2776 /* check whether we really need this */
2777 switch (want) {
2778 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1138 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2779 if (atomic_test_mask 1139 result = zfcp_erp_strategy_check_unit(unit, result);
2780 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) { 1140 break;
2781 goto out;
2782 }
2783 if (!atomic_test_mask
2784 (ZFCP_STATUS_COMMON_RUNNING, &port->status) ||
2785 atomic_test_mask
2786 (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
2787 goto out;
2788 }
2789 if (!atomic_test_mask
2790 (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
2791 need = ZFCP_ERP_ACTION_REOPEN_PORT;
2792 /* fall through !!! */
2793
2794 case ZFCP_ERP_ACTION_REOPEN_PORT:
2795 if (atomic_test_mask
2796 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
2797 goto out;
2798 }
2799 /* fall through !!! */
2800 1141
2801 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1142 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2802 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1143 case ZFCP_ERP_ACTION_REOPEN_PORT:
2803 &port->status)) { 1144 result = zfcp_erp_strategy_check_port(port, result);
2804 if (port->erp_action.action != 1145 break;
2805 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
2806 ZFCP_LOG_INFO("dropped erp action %i (port "
2807 "0x%016Lx, action in use: %i)\n",
2808 want, port->wwpn,
2809 port->erp_action.action);
2810 }
2811 goto out;
2812 }
2813 if (!atomic_test_mask
2814 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
2815 atomic_test_mask
2816 (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
2817 goto out;
2818 }
2819 if (!atomic_test_mask
2820 (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
2821 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
2822 /* fall through !!! */
2823 1146
2824 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1147 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
2825 if (atomic_test_mask 1148 result = zfcp_erp_strategy_check_adapter(adapter, result);
2826 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
2827 goto out;
2828 }
2829 break; 1149 break;
2830
2831 default:
2832 ZFCP_LOG_NORMAL("bug: unknown erp action requested "
2833 "on adapter %s (action=%d)\n",
2834 zfcp_get_busid_by_adapter(adapter), want);
2835 goto out;
2836 } 1150 }
1151 return result;
1152}
2837 1153
2838 /* check whether we need something stronger first */ 1154static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
2839 if (need) { 1155{
2840 ZFCP_LOG_DEBUG("stronger erp action %d needed before " 1156 int status = atomic_read(target_status);
2841 "erp action %d on adapter %s\n",
2842 need, want, zfcp_get_busid_by_adapter(adapter));
2843 }
2844 1157
2845 /* mark adapter to have some error recovery pending */ 1158 if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
2846 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 1159 (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
1160 return 1; /* take it online */
2847 1161
2848 /* setup error recovery action */ 1162 if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
2849 switch (need) { 1163 !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
1164 return 1; /* take it offline */
2850 1165
2851 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1166 return 0;
2852 zfcp_unit_get(unit); 1167}
2853 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 1168
2854 erp_action = &unit->erp_action; 1169static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
2855 if (!atomic_test_mask 1170{
2856 (ZFCP_STATUS_COMMON_RUNNING, &unit->status)) 1171 int action = act->action;
2857 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 1172 struct zfcp_adapter *adapter = act->adapter;
1173 struct zfcp_port *port = act->port;
1174 struct zfcp_unit *unit = act->unit;
1175 u32 erp_status = act->status;
1176
1177 switch (action) {
1178 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1179 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1180 _zfcp_erp_adapter_reopen(adapter,
1181 ZFCP_STATUS_COMMON_ERP_FAILED,
1182 67, NULL);
1183 return ZFCP_ERP_EXIT;
1184 }
2858 break; 1185 break;
2859 1186
2860 case ZFCP_ERP_ACTION_REOPEN_PORT:
2861 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1187 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2862 zfcp_port_get(port); 1188 case ZFCP_ERP_ACTION_REOPEN_PORT:
2863 zfcp_erp_action_dismiss_port(port); 1189 if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
2864 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 1190 _zfcp_erp_port_reopen(port,
2865 erp_action = &port->erp_action; 1191 ZFCP_STATUS_COMMON_ERP_FAILED,
2866 if (!atomic_test_mask 1192 68, NULL);
2867 (ZFCP_STATUS_COMMON_RUNNING, &port->status)) 1193 return ZFCP_ERP_EXIT;
2868 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 1194 }
2869 break; 1195 break;
2870 1196
2871 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1197 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2872 zfcp_adapter_get(adapter); 1198 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
2873 zfcp_erp_action_dismiss_adapter(adapter); 1199 _zfcp_erp_unit_reopen(unit,
2874 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 1200 ZFCP_STATUS_COMMON_ERP_FAILED,
2875 erp_action = &adapter->erp_action; 1201 69, NULL);
2876 if (!atomic_test_mask 1202 return ZFCP_ERP_EXIT;
2877 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status)) 1203 }
2878 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
2879 break; 1204 break;
2880 } 1205 }
2881 1206 return ret;
2882 memset(erp_action, 0, sizeof (struct zfcp_erp_action));
2883 erp_action->adapter = adapter;
2884 erp_action->port = port;
2885 erp_action->unit = unit;
2886 erp_action->action = need;
2887 erp_action->status = status;
2888
2889 ++adapter->erp_total_count;
2890
2891 /* finally put it into 'ready' queue and kick erp thread */
2892 list_add_tail(&erp_action->list, &adapter->erp_ready_head);
2893 up(&adapter->erp_ready_sem);
2894 zfcp_rec_dbf_event_thread(1, adapter, 0);
2895 retval = 0;
2896 out:
2897 zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action,
2898 adapter, port, unit);
2899 return retval;
2900} 1207}
2901 1208
2902static int 1209static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2903zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2904{ 1210{
2905 int retval = 0;
2906 struct zfcp_adapter *adapter = erp_action->adapter; 1211 struct zfcp_adapter *adapter = erp_action->adapter;
2907 1212
2908 --adapter->erp_total_count; 1213 adapter->erp_total_count--;
2909 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { 1214 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
2910 --adapter->erp_low_mem_count; 1215 adapter->erp_low_mem_count--;
2911 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; 1216 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
2912 } 1217 }
2913 1218
@@ -2919,141 +1224,458 @@ zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2919 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1224 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2920 &erp_action->unit->status); 1225 &erp_action->unit->status);
2921 break; 1226 break;
1227
2922 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1228 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2923 case ZFCP_ERP_ACTION_REOPEN_PORT: 1229 case ZFCP_ERP_ACTION_REOPEN_PORT:
2924 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1230 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2925 &erp_action->port->status); 1231 &erp_action->port->status);
2926 break; 1232 break;
1233
2927 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1234 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
2928 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1235 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2929 &erp_action->adapter->status); 1236 &erp_action->adapter->status);
2930 break; 1237 break;
2931 default:
2932 /* bug */
2933 break;
2934 } 1238 }
2935 return retval;
2936} 1239}
2937 1240
2938/** 1241struct zfcp_erp_add_work {
2939 * zfcp_erp_action_cleanup 1242 struct zfcp_unit *unit;
2940 * 1243 struct work_struct work;
2941 * Register unit with scsi stack if appropriate and fix reference counts. 1244};
2942 * Note: Temporary units are not registered with scsi stack. 1245
2943 */ 1246static void zfcp_erp_scsi_scan(struct work_struct *work)
2944static void
2945zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
2946 struct zfcp_port *port, struct zfcp_unit *unit,
2947 int result)
2948{ 1247{
2949 switch (action) { 1248 struct zfcp_erp_add_work *p =
1249 container_of(work, struct zfcp_erp_add_work, work);
1250 struct zfcp_unit *unit = p->unit;
1251 struct fc_rport *rport = unit->port->rport;
1252 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1253 unit->scsi_lun, 0);
1254 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1255 zfcp_unit_put(unit);
1256 kfree(p);
1257}
1258
1259static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1260{
1261 struct zfcp_erp_add_work *p;
1262
1263 p = kzalloc(sizeof(*p), GFP_KERNEL);
1264 if (!p) {
1265 dev_err(&unit->port->adapter->ccw_device->dev,
1266 "Out of resources. Could not register unit "
1267 "0x%016Lx on port 0x%016Lx with SCSI stack.\n",
1268 unit->fcp_lun, unit->port->wwpn);
1269 return;
1270 }
1271
1272 zfcp_unit_get(unit);
1273 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1274 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1275 p->unit = unit;
1276 schedule_work(&p->work);
1277}
1278
1279static void zfcp_erp_rport_register(struct zfcp_port *port)
1280{
1281 struct fc_rport_identifiers ids;
1282 ids.node_name = port->wwnn;
1283 ids.port_name = port->wwpn;
1284 ids.port_id = port->d_id;
1285 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
1286 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1287 if (!port->rport) {
1288 dev_err(&port->adapter->ccw_device->dev,
1289 "Failed registration of rport "
1290 "0x%016Lx.\n", port->wwpn);
1291 return;
1292 }
1293
1294 scsi_target_unblock(&port->rport->dev);
1295 port->rport->maxframe_size = port->maxframe_size;
1296 port->rport->supported_classes = port->supported_classes;
1297}
1298
1299static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1300{
1301 struct zfcp_port *port;
1302 list_for_each_entry(port, &adapter->port_list_head, list)
1303 if (port->rport && !(atomic_read(&port->status) &
1304 ZFCP_STATUS_PORT_WKA)) {
1305 fc_remote_port_delete(port->rport);
1306 port->rport = NULL;
1307 }
1308}
1309
1310static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1311{
1312 struct zfcp_adapter *adapter = act->adapter;
1313 struct zfcp_port *port = act->port;
1314 struct zfcp_unit *unit = act->unit;
1315
1316 switch (act->action) {
2950 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1317 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2951 if ((result == ZFCP_ERP_SUCCEEDED) 1318 if ((result == ZFCP_ERP_SUCCEEDED) &&
2952 && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, 1319 !unit->device && port->rport) {
2953 &unit->status))
2954 && !unit->device
2955 && port->rport) {
2956 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, 1320 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
2957 &unit->status); 1321 &unit->status);
2958 if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, 1322 if (!(atomic_read(&unit->status) &
2959 &unit->status) == 0) 1323 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
2960 zfcp_erp_schedule_work(unit); 1324 zfcp_erp_schedule_work(unit);
2961 } 1325 }
2962 zfcp_unit_put(unit); 1326 zfcp_unit_put(unit);
2963 break; 1327 break;
1328
2964 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1329 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2965 case ZFCP_ERP_ACTION_REOPEN_PORT: 1330 case ZFCP_ERP_ACTION_REOPEN_PORT:
2966 if (atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, 1331 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) {
2967 &port->status)) {
2968 zfcp_port_put(port); 1332 zfcp_port_put(port);
2969 break; 1333 return;
2970 }
2971
2972 if ((result == ZFCP_ERP_SUCCEEDED)
2973 && !port->rport) {
2974 struct fc_rport_identifiers ids;
2975 ids.node_name = port->wwnn;
2976 ids.port_name = port->wwpn;
2977 ids.port_id = port->d_id;
2978 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
2979 port->rport =
2980 fc_remote_port_add(adapter->scsi_host, 0, &ids);
2981 if (!port->rport)
2982 ZFCP_LOG_NORMAL("failed registration of rport"
2983 "(adapter %s, wwpn=0x%016Lx)\n",
2984 zfcp_get_busid_by_port(port),
2985 port->wwpn);
2986 else {
2987 scsi_target_unblock(&port->rport->dev);
2988 port->rport->maxframe_size = port->maxframe_size;
2989 port->rport->supported_classes =
2990 port->supported_classes;
2991 }
2992 } 1334 }
1335 if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport)
1336 zfcp_erp_rport_register(port);
2993 if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) { 1337 if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
2994 fc_remote_port_delete(port->rport); 1338 fc_remote_port_delete(port->rport);
2995 port->rport = NULL; 1339 port->rport = NULL;
2996 } 1340 }
2997 zfcp_port_put(port); 1341 zfcp_port_put(port);
2998 break; 1342 break;
1343
2999 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1344 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3000 if (result != ZFCP_ERP_SUCCEEDED) { 1345 if (result != ZFCP_ERP_SUCCEEDED)
3001 list_for_each_entry(port, &adapter->port_list_head, list) 1346 zfcp_erp_rports_del(adapter);
3002 if (port->rport &&
3003 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
3004 &port->status)) {
3005 fc_remote_port_delete(port->rport);
3006 port->rport = NULL;
3007 }
3008 }
3009 zfcp_adapter_put(adapter); 1347 zfcp_adapter_put(adapter);
3010 break; 1348 break;
3011 default:
3012 break;
3013 } 1349 }
3014} 1350}
3015 1351
1352static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1353{
1354 switch (erp_action->action) {
1355 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1356 return zfcp_erp_adapter_strategy(erp_action);
1357 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1358 return zfcp_erp_port_forced_strategy(erp_action);
1359 case ZFCP_ERP_ACTION_REOPEN_PORT:
1360 return zfcp_erp_port_strategy(erp_action);
1361 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1362 return zfcp_erp_unit_strategy(erp_action);
1363 }
1364 return ZFCP_ERP_FAILED;
1365}
3016 1366
3017static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 1367static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
3018{ 1368{
3019 struct zfcp_port *port; 1369 int retval;
1370 struct zfcp_adapter *adapter = erp_action->adapter;
1371 unsigned long flags;
3020 1372
3021 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) 1373 read_lock_irqsave(&zfcp_data.config_lock, flags);
3022 zfcp_erp_action_dismiss(&adapter->erp_action); 1374 write_lock(&adapter->erp_lock);
3023 else 1375
3024 list_for_each_entry(port, &adapter->port_list_head, list) 1376 zfcp_erp_strategy_check_fsfreq(erp_action);
3025 zfcp_erp_action_dismiss_port(port); 1377
1378 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1379 zfcp_erp_action_dequeue(erp_action);
1380 retval = ZFCP_ERP_DISMISSED;
1381 goto unlock;
1382 }
1383
1384 zfcp_erp_action_to_running(erp_action);
1385
1386 /* no lock to allow for blocking operations */
1387 write_unlock(&adapter->erp_lock);
1388 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1389 retval = zfcp_erp_strategy_do_action(erp_action);
1390 read_lock_irqsave(&zfcp_data.config_lock, flags);
1391 write_lock(&adapter->erp_lock);
1392
1393 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1394 retval = ZFCP_ERP_CONTINUES;
1395
1396 switch (retval) {
1397 case ZFCP_ERP_NOMEM:
1398 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1399 ++adapter->erp_low_mem_count;
1400 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1401 }
1402 if (adapter->erp_total_count == adapter->erp_low_mem_count)
1403 _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL);
1404 else {
1405 zfcp_erp_strategy_memwait(erp_action);
1406 retval = ZFCP_ERP_CONTINUES;
1407 }
1408 goto unlock;
1409
1410 case ZFCP_ERP_CONTINUES:
1411 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
1412 --adapter->erp_low_mem_count;
1413 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1414 }
1415 goto unlock;
1416 }
1417
1418 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1419 zfcp_erp_action_dequeue(erp_action);
1420 retval = zfcp_erp_strategy_statechange(erp_action, retval);
1421 if (retval == ZFCP_ERP_EXIT)
1422 goto unlock;
1423 zfcp_erp_strategy_followup_actions(erp_action);
1424
1425 unlock:
1426 write_unlock(&adapter->erp_lock);
1427 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1428
1429 if (retval != ZFCP_ERP_CONTINUES)
1430 zfcp_erp_action_cleanup(erp_action, retval);
1431
1432 return retval;
3026} 1433}
3027 1434
3028static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) 1435static int zfcp_erp_thread(void *data)
3029{ 1436{
3030 struct zfcp_unit *unit; 1437 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
1438 struct list_head *next;
1439 struct zfcp_erp_action *act;
1440 unsigned long flags;
3031 1441
3032 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) 1442 daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id);
3033 zfcp_erp_action_dismiss(&port->erp_action); 1443 /* Block all signals */
1444 siginitsetinv(&current->blocked, 0);
1445 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1446 wake_up(&adapter->erp_thread_wqh);
1447
1448 while (!(atomic_read(&adapter->status) &
1449 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
1450 write_lock_irqsave(&adapter->erp_lock, flags);
1451 next = adapter->erp_ready_head.next;
1452 write_unlock_irqrestore(&adapter->erp_lock, flags);
1453
1454 if (next != &adapter->erp_ready_head) {
1455 act = list_entry(next, struct zfcp_erp_action, list);
1456
1457 /* there is more to come after dismission, no notify */
1458 if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
1459 zfcp_erp_wakeup(adapter);
1460 }
1461
1462 zfcp_rec_dbf_event_thread(4, adapter);
1463 down_interruptible(&adapter->erp_ready_sem);
1464 zfcp_rec_dbf_event_thread(5, adapter);
1465 }
1466
1467 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1468 wake_up(&adapter->erp_thread_wqh);
1469
1470 return 0;
1471}
1472
1473/**
1474 * zfcp_erp_thread_setup - Start ERP thread for adapter
1475 * @adapter: Adapter to start the ERP thread for
1476 *
1477 * Returns 0 on success or error code from kernel_thread()
1478 */
1479int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1480{
1481 int retval;
1482
1483 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1484 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1485 if (retval < 0) {
1486 dev_err(&adapter->ccw_device->dev,
1487 "Creation of ERP thread failed.\n");
1488 return retval;
1489 }
1490 wait_event(adapter->erp_thread_wqh,
1491 atomic_read(&adapter->status) &
1492 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
1493 return 0;
1494}
1495
1496/**
1497 * zfcp_erp_thread_kill - Stop ERP thread.
1498 * @adapter: Adapter where the ERP thread should be stopped.
1499 *
1500 * The caller of this routine ensures that the specified adapter has
1501 * been shut down and that this operation has been completed. Thus,
1502 * there are no pending erp_actions which would need to be handled
1503 * here.
1504 */
1505void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1506{
1507 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1508 up(&adapter->erp_ready_sem);
1509 zfcp_rec_dbf_event_thread_lock(2, adapter);
1510
1511 wait_event(adapter->erp_thread_wqh,
1512 !(atomic_read(&adapter->status) &
1513 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
1514
1515 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1516 &adapter->status);
1517}
1518
1519/**
1520 * zfcp_erp_adapter_failed - Set adapter status to failed.
1521 * @adapter: Failed adapter.
1522 * @id: Event id for debug trace.
1523 * @ref: Reference for debug trace.
1524 */
1525void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1526{
1527 zfcp_erp_modify_adapter_status(adapter, id, ref,
1528 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1529 dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
1530}
1531
1532/**
1533 * zfcp_erp_port_failed - Set port status to failed.
1534 * @port: Failed port.
1535 * @id: Event id for debug trace.
1536 * @ref: Reference for debug trace.
1537 */
1538void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1539{
1540 zfcp_erp_modify_port_status(port, id, ref,
1541 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1542
1543 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1544 dev_err(&port->adapter->ccw_device->dev,
1545 "Port ERP failed for WKA port d_id=0x%06x.\n",
1546 port->d_id);
3034 else 1547 else
3035 list_for_each_entry(unit, &port->unit_list_head, list) 1548 dev_err(&port->adapter->ccw_device->dev,
3036 zfcp_erp_action_dismiss_unit(unit); 1549 "Port ERP failed for port wwpn=0x%016Lx.\n",
1550 port->wwpn);
3037} 1551}
3038 1552
3039static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) 1553/**
1554 * zfcp_erp_unit_failed - Set unit status to failed.
1555 * @unit: Failed unit.
1556 * @id: Event id for debug trace.
1557 * @ref: Reference for debug trace.
1558 */
1559void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
3040{ 1560{
3041 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 1561 zfcp_erp_modify_unit_status(unit, id, ref,
3042 zfcp_erp_action_dismiss(&unit->erp_action); 1562 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1563
1564 dev_err(&unit->port->adapter->ccw_device->dev,
1565 "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
1566 unit->fcp_lun, unit->port->wwpn);
3043} 1567}
3044 1568
3045static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 1569/**
1570 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1571 * @adapter: adapter for which to wait for completion of its error recovery
1572 */
1573void zfcp_erp_wait(struct zfcp_adapter *adapter)
3046{ 1574{
3047 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 1575 wait_event(adapter->erp_done_wqh,
3048 zfcp_rec_dbf_event_action(145, erp_action); 1576 !(atomic_read(&adapter->status) &
1577 ZFCP_STATUS_ADAPTER_ERP_PENDING));
1578}
1579
1580/**
1581 * zfcp_erp_modify_adapter_status - change adapter status bits
1582 * @adapter: adapter to change the status
1583 * @id: id for the debug trace
1584 * @ref: reference for the debug trace
1585 * @mask: status bits to change
1586 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1587 *
1588 * Changes in common status bits are propagated to attached ports and units.
1589 */
1590void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
1591 void *ref, u32 mask, int set_or_clear)
1592{
1593 struct zfcp_port *port;
1594 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1595
1596 if (set_or_clear == ZFCP_SET) {
1597 if (status_change_set(mask, &adapter->status))
1598 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1599 atomic_set_mask(mask, &adapter->status);
1600 } else {
1601 if (status_change_clear(mask, &adapter->status))
1602 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1603 atomic_clear_mask(mask, &adapter->status);
1604 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1605 atomic_set(&adapter->erp_counter, 0);
1606 }
1607
1608 if (common_mask)
1609 list_for_each_entry(port, &adapter->port_list_head, list)
1610 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1611 set_or_clear);
3049} 1612}
3050 1613
3051static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action) 1614/**
1615 * zfcp_erp_modify_port_status - change port status bits
1616 * @port: port to change the status bits
1617 * @id: id for the debug trace
1618 * @ref: reference for the debug trace
1619 * @mask: status bits to change
1620 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1621 *
1622 * Changes in common status bits are propagated to attached units.
1623 */
1624void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
1625 u32 mask, int set_or_clear)
3052{ 1626{
3053 list_move(&erp_action->list, &erp_action->adapter->erp_ready_head); 1627 struct zfcp_unit *unit;
3054 zfcp_rec_dbf_event_action(146, erp_action); 1628 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1629
1630 if (set_or_clear == ZFCP_SET) {
1631 if (status_change_set(mask, &port->status))
1632 zfcp_rec_dbf_event_port(id, ref, port);
1633 atomic_set_mask(mask, &port->status);
1634 } else {
1635 if (status_change_clear(mask, &port->status))
1636 zfcp_rec_dbf_event_port(id, ref, port);
1637 atomic_clear_mask(mask, &port->status);
1638 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1639 atomic_set(&port->erp_counter, 0);
1640 }
1641
1642 if (common_mask)
1643 list_for_each_entry(unit, &port->unit_list_head, list)
1644 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1645 set_or_clear);
3055} 1646}
3056 1647
1648/**
1649 * zfcp_erp_modify_unit_status - change unit status bits
1650 * @unit: unit to change the status bits
1651 * @id: id for the debug trace
1652 * @ref: reference for the debug trace
1653 * @mask: status bits to change
1654 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1655 */
1656void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
1657 u32 mask, int set_or_clear)
1658{
1659 if (set_or_clear == ZFCP_SET) {
1660 if (status_change_set(mask, &unit->status))
1661 zfcp_rec_dbf_event_unit(id, ref, unit);
1662 atomic_set_mask(mask, &unit->status);
1663 } else {
1664 if (status_change_clear(mask, &unit->status))
1665 zfcp_rec_dbf_event_unit(id, ref, unit);
1666 atomic_clear_mask(mask, &unit->status);
1667 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1668 atomic_set(&unit->erp_counter, 0);
1669 }
1670 }
1671}
1672
1673/**
1674 * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
1675 * @port: The "boxed" port.
1676 * @id: The debug trace id.
1677 * @id: Reference for the debug trace.
1678 */
3057void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) 1679void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
3058{ 1680{
3059 unsigned long flags; 1681 unsigned long flags;
@@ -3065,6 +1687,12 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
3065 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1687 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3066} 1688}
3067 1689
1690/**
1691 * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
1692 * @port: The "boxed" unit.
1693 * @id: The debug trace id.
1694 * @id: Reference for the debug trace.
1695 */
3068void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) 1696void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
3069{ 1697{
3070 zfcp_erp_modify_unit_status(unit, id, ref, 1698 zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3072,6 +1700,15 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
3072 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1700 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3073} 1701}
3074 1702
1703/**
1704 * zfcp_erp_port_access_denied - Adapter denied access to port.
1705 * @port: port where access has been denied
1706 * @id: id for debug trace
1707 * @ref: reference for debug trace
1708 *
1709 * Since the adapter has denied access, stop using the port and the
1710 * attached units.
1711 */
3075void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) 1712void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
3076{ 1713{
3077 unsigned long flags; 1714 unsigned long flags;
@@ -3083,6 +1720,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
3083 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1720 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3084} 1721}
3085 1722
1723/**
1724 * zfcp_erp_unit_access_denied - Adapter denied access to unit.
1725 * @unit: unit where access has been denied
1726 * @id: id for debug trace
1727 * @ref: reference for debug trace
1728 *
1729 * Since the adapter has denied access, stop using the unit.
1730 */
3086void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) 1731void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
3087{ 1732{
3088 zfcp_erp_modify_unit_status(unit, id, ref, 1733 zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3090,67 +1735,54 @@ void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
3090 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1735 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
3091} 1736}
3092 1737
3093void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, 1738static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
3094 void *ref) 1739 void *ref)
3095{ 1740{
3096 struct zfcp_port *port; 1741 int status = atomic_read(&unit->status);
3097 unsigned long flags; 1742 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
3098 1743 ZFCP_STATUS_COMMON_ACCESS_BOXED)))
3099 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3100 return; 1744 return;
3101 1745
3102 read_lock_irqsave(&zfcp_data.config_lock, flags); 1746 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3103 if (adapter->nameserver_port)
3104 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
3105 list_for_each_entry(port, &adapter->port_list_head, list)
3106 if (port != adapter->nameserver_port)
3107 zfcp_erp_port_access_changed(port, id, ref);
3108 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3109} 1747}
3110 1748
3111void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref) 1749static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1750 void *ref)
3112{ 1751{
3113 struct zfcp_adapter *adapter = port->adapter;
3114 struct zfcp_unit *unit; 1752 struct zfcp_unit *unit;
1753 int status = atomic_read(&port->status);
3115 1754
3116 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, 1755 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
3117 &port->status) && 1756 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
3118 !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1757 if (!(status & ZFCP_STATUS_PORT_WKA))
3119 &port->status)) {
3120 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
3121 list_for_each_entry(unit, &port->unit_list_head, list) 1758 list_for_each_entry(unit, &port->unit_list_head, list)
3122 zfcp_erp_unit_access_changed(unit, id, ref); 1759 zfcp_erp_unit_access_changed(unit, id, ref);
3123 return; 1760 return;
3124 } 1761 }
3125 1762
3126 ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s " 1763 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3127 "(due to ACT update)\n",
3128 port->wwpn, zfcp_get_busid_by_adapter(adapter));
3129 if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
3130 ZFCP_LOG_NORMAL("failed reopen of port"
3131 "(adapter %s, wwpn=0x%016Lx)\n",
3132 zfcp_get_busid_by_adapter(adapter), port->wwpn);
3133} 1764}
3134 1765
3135void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref) 1766/**
1767 * zfcp_erp_adapter_access_changed - Process change in adapter ACT
1768 * @adapter: Adapter where the Access Control Table (ACT) changed
1769 * @id: Id for debug trace
1770 * @ref: Reference for debug trace
1771 */
1772void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
1773 void *ref)
3136{ 1774{
3137 struct zfcp_adapter *adapter = unit->port->adapter; 1775 struct zfcp_port *port;
1776 unsigned long flags;
3138 1777
3139 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, 1778 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3140 &unit->status) &&
3141 !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
3142 &unit->status))
3143 return; 1779 return;
3144 1780
3145 ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx " 1781 read_lock_irqsave(&zfcp_data.config_lock, flags);
3146 " on adapter %s (due to ACT update)\n", 1782 if (adapter->nameserver_port)
3147 unit->fcp_lun, unit->port->wwpn, 1783 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
3148 zfcp_get_busid_by_adapter(adapter)); 1784 list_for_each_entry(port, &adapter->port_list_head, list)
3149 if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) 1785 if (port != adapter->nameserver_port)
3150 ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, " 1786 zfcp_erp_port_access_changed(port, id, ref);
3151 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", 1787 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3152 zfcp_get_busid_by_adapter(adapter),
3153 unit->port->wwpn, unit->fcp_lun);
3154} 1788}
3155
3156#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6abf178fda5d..edfdb21591f3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * External function declarations.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -24,172 +11,50 @@
24 11
25#include "zfcp_def.h" 12#include "zfcp_def.h"
26 13
27extern struct zfcp_data zfcp_data; 14/* zfcp_aux.c */
28 15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
29/******************************** SYSFS *************************************/ 16 fcp_lun_t);
30extern struct attribute_group *zfcp_driver_attr_groups[]; 17extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
31extern int zfcp_sysfs_adapter_create_files(struct device *); 18 wwn_t);
32extern void zfcp_sysfs_adapter_remove_files(struct device *); 19extern int zfcp_adapter_enqueue(struct ccw_device *);
33extern int zfcp_sysfs_port_create_files(struct device *, u32); 20extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
34extern void zfcp_sysfs_port_remove_files(struct device *, u32); 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
35extern int zfcp_sysfs_unit_create_files(struct device *); 22 u32);
36extern void zfcp_sysfs_unit_remove_files(struct device *); 23extern void zfcp_port_dequeue(struct zfcp_port *);
37extern void zfcp_sysfs_port_release(struct device *);
38extern void zfcp_sysfs_unit_release(struct device *);
39
40/**************************** CONFIGURATION *********************************/
41extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
42extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
43extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
44struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
45extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
46extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
47extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
48extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
49extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
50 u32, u32);
51extern void zfcp_port_dequeue(struct zfcp_port *);
52extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); 24extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
53extern void zfcp_unit_dequeue(struct zfcp_unit *); 25extern void zfcp_unit_dequeue(struct zfcp_unit *);
54 26extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
55/******************************* S/390 IO ************************************/ 27extern void zfcp_sg_free_table(struct scatterlist *, int);
56extern int zfcp_ccw_register(void); 28extern int zfcp_sg_setup_table(struct scatterlist *, int);
57 29
58extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); 30/* zfcp_ccw.c */
59extern int zfcp_qdio_allocate(struct zfcp_adapter *); 31extern int zfcp_ccw_register(void);
60extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); 32
61extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 33/* zfcp_cfdc.c */
62extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 34extern struct miscdevice zfcp_cfdc_misc;
63 struct zfcp_fsf_req *); 35
64 36/* zfcp_dbf.c */
65extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 37extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
66 (struct zfcp_fsf_req *, int, int); 38extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr 39extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *);
68 (struct zfcp_fsf_req *); 40extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *);
69extern int zfcp_qdio_sbals_from_sg 41extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *);
70 (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int); 42extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *);
71extern int zfcp_qdio_sbals_from_scsicmnd 43extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *);
72 (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *); 44extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *,
73 45 struct zfcp_adapter *,
74
75/******************************** FSF ****************************************/
76extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
77extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
78extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
79
80extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
81extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
82
83extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
84extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
85 struct fsf_qtcb_bottom_config *);
86extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
87extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
88 struct fsf_qtcb_bottom_port *);
89extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
90 u32, u32, struct zfcp_sg_list *);
91extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
92extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
93extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
94extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
95extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
96 unsigned long *, struct zfcp_fsf_req **);
97extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
98 struct zfcp_erp_action *);
99extern int zfcp_fsf_send_els(struct zfcp_send_els *);
100extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
101 struct zfcp_unit *,
102 struct scsi_cmnd *, int, int);
103extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
104extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
105extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
106extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
107 struct zfcp_adapter *, struct zfcp_unit *, u8, int);
108extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
109 unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
110
111/******************************* FC/FCP **************************************/
112extern int zfcp_nameserver_enqueue(struct zfcp_adapter *);
113extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
114extern int zfcp_check_ct_response(struct ct_hdr *);
115extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
116extern void zfcp_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
117
118/******************************* SCSI ****************************************/
119extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
120extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
121extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
122extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
123extern void set_host_byte(int *, char);
124extern void set_driver_byte(int *, char);
125extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
126extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
127
128extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
129 struct scsi_cmnd *, int);
130extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
131extern struct fc_function_template zfcp_transport_functions;
132
133/******************************** ERP ****************************************/
134extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
135 u32, int);
136extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
137extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
138extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
139
140extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
141 int);
142extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
143extern int zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
144extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
145extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
146extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *);
147
148extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
149 int);
150extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
151extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
152extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
153
154extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
155extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
156extern int zfcp_erp_wait(struct zfcp_adapter *);
157extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
158
159extern int zfcp_test_link(struct zfcp_port *);
160
161extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref);
162extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref);
163extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref);
164extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref);
165extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
166extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *);
167extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *);
168
169/******************************** AUX ****************************************/
170extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter,
171 int lock);
172extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *);
173extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port);
174extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit);
175extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need,
176 void *action, struct zfcp_adapter *,
177 struct zfcp_port *, struct zfcp_unit *); 46 struct zfcp_port *, struct zfcp_unit *);
178extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *); 47extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
179
180extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); 48extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
181extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, 49extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
182 struct fsf_status_read_buffer *); 50 struct fsf_status_read_buffer *);
183extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, 51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
184 unsigned int, unsigned int, unsigned int, 52 int);
185 int, int);
186
187extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
188extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
189extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
190extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); 56extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
191extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 57extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
192
193extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 58extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
194 struct scsi_cmnd *, 59 struct scsi_cmnd *,
195 struct zfcp_fsf_req *); 60 struct zfcp_fsf_req *);
@@ -198,6 +63,101 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
198 unsigned long); 63 unsigned long);
199extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 64extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
200 struct scsi_cmnd *); 65 struct scsi_cmnd *);
201extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 66
67/* zfcp_erp.c */
68extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
69 u32, int);
70extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
71extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
72extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
73extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
74 int);
75extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
76extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
77extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
78extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
79extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
80 int);
81extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
82extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
83extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
84extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
85extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
86extern void zfcp_erp_wait(struct zfcp_adapter *);
87extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
88extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *);
89extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *);
90extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
91extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
92extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
93extern void zfcp_erp_timeout_handler(unsigned long);
94
95/* zfcp_fc.c */
96extern int zfcp_scan_ports(struct zfcp_adapter *);
97extern void _zfcp_scan_ports_later(struct work_struct *);
98extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
99extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
101extern void zfcp_test_link(struct zfcp_port *);
102
103/* zfcp_fsf.c */
104extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
105extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
106extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
107extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
108extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
109extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
110extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
111 struct fsf_qtcb_bottom_config *);
112extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
113extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
114 struct fsf_qtcb_bottom_port *);
115extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
116 struct zfcp_fsf_cfdc *);
117extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
118extern int zfcp_fsf_status_read(struct zfcp_adapter *);
119extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
120extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
121 struct zfcp_erp_action *);
122extern int zfcp_fsf_send_els(struct zfcp_send_els *);
123extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
124 struct zfcp_unit *,
125 struct scsi_cmnd *, int, int);
126extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
127extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
128extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *,
129 struct zfcp_unit *, u8, int);
130extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
131 struct zfcp_adapter *,
132 struct zfcp_unit *, int);
133
134/* zfcp_qdio.c */
135extern int zfcp_qdio_allocate(struct zfcp_adapter *);
136extern void zfcp_qdio_free(struct zfcp_adapter *);
137extern int zfcp_qdio_send(struct zfcp_fsf_req *);
138extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
139 struct zfcp_fsf_req *);
140extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
141 struct zfcp_fsf_req *);
142extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
143 struct scatterlist *, int);
144extern int zfcp_qdio_open(struct zfcp_adapter *);
145extern void zfcp_qdio_close(struct zfcp_adapter *);
146
147/* zfcp_scsi.c */
148extern struct zfcp_data zfcp_data;
149extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
150extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
151extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
152extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
153extern struct fc_function_template zfcp_transport_functions;
154
155/* zfcp_sysfs.c */
156extern struct attribute_group zfcp_sysfs_unit_attrs;
157extern struct attribute_group zfcp_sysfs_adapter_attrs;
158extern struct attribute_group zfcp_sysfs_ns_port_attrs;
159extern struct attribute_group zfcp_sysfs_port_attrs;
160extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
161extern struct device_attribute *zfcp_sysfs_shost_attrs[];
202 162
203#endif /* ZFCP_EXT_H */ 163#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
new file mode 100644
index 000000000000..e984469bb98b
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -0,0 +1,567 @@
1/*
2 * zfcp device driver
3 *
4 * Fibre Channel related functions for the zfcp device driver.
5 *
6 * Copyright IBM Corporation 2008
7 */
8
9#include "zfcp_ext.h"
10
11struct ct_iu_gpn_ft_req {
12 struct ct_hdr header;
13 u8 flags;
14 u8 domain_id_scope;
15 u8 area_id_scope;
16 u8 fc4_type;
17} __attribute__ ((packed));
18
19struct gpn_ft_resp_acc {
20 u8 control;
21 u8 port_id[3];
22 u8 reserved[4];
23 u64 wwpn;
24} __attribute__ ((packed));
25
26#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
27 / sizeof(struct gpn_ft_resp_acc))
28#define ZFCP_GPN_FT_BUFFERS 4
29#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
30
31struct ct_iu_gpn_ft_resp {
32 struct ct_hdr header;
33 struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
34} __attribute__ ((packed));
35
36struct zfcp_gpn_ft {
37 struct zfcp_send_ct ct;
38 struct scatterlist sg_req;
39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
40};
41
42static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter,
43 u32 d_id)
44{
45 struct zfcp_port *port;
46
47 list_for_each_entry(port, &adapter->port_list_head, list)
48 if ((port->d_id == d_id) &&
49 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
50 return port;
51 return NULL;
52}
53
54static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
55 struct fcp_rscn_element *elem)
56{
57 unsigned long flags;
58 struct zfcp_port *port;
59
60 read_lock_irqsave(&zfcp_data.config_lock, flags);
61 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
62 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
63 continue;
64 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
65 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
66 /* Try to connect to unused ports anyway. */
67 zfcp_erp_port_reopen(port,
68 ZFCP_STATUS_COMMON_ERP_FAILED,
69 82, fsf_req);
70 else if ((port->d_id & range) == (elem->nport_did & range))
71 /* Check connection status for connected ports */
72 zfcp_test_link(port);
73 }
74 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
75}
76
77static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
78{
79 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
80 struct fcp_rscn_head *fcp_rscn_head;
81 struct fcp_rscn_element *fcp_rscn_element;
82 u16 i;
83 u16 no_entries;
84 u32 range_mask;
85
86 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
87 fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
88
89 /* see FC-FS */
90 no_entries = fcp_rscn_head->payload_len /
91 sizeof(struct fcp_rscn_element);
92
93 for (i = 1; i < no_entries; i++) {
94 /* skip head and start with 1st element */
95 fcp_rscn_element++;
96 switch (fcp_rscn_element->addr_format) {
97 case ZFCP_PORT_ADDRESS:
98 range_mask = ZFCP_PORTS_RANGE_PORT;
99 break;
100 case ZFCP_AREA_ADDRESS:
101 range_mask = ZFCP_PORTS_RANGE_AREA;
102 break;
103 case ZFCP_DOMAIN_ADDRESS:
104 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
105 break;
106 case ZFCP_FABRIC_ADDRESS:
107 range_mask = ZFCP_PORTS_RANGE_FABRIC;
108 break;
109 default:
110 continue;
111 }
112 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
113 }
114 schedule_work(&fsf_req->adapter->scan_work);
115}
116
117static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
118{
119 struct zfcp_adapter *adapter = req->adapter;
120 struct zfcp_port *port;
121 unsigned long flags;
122
123 read_lock_irqsave(&zfcp_data.config_lock, flags);
124 list_for_each_entry(port, &adapter->port_list_head, list)
125 if (port->wwpn == wwpn)
126 break;
127 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
128
129 if (port && (port->wwpn == wwpn))
130 zfcp_erp_port_forced_reopen(port, 0, 83, req);
131}
132
133static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
134{
135 struct fsf_status_read_buffer *status_buffer =
136 (struct fsf_status_read_buffer *)req->data;
137 struct fsf_plogi *els_plogi =
138 (struct fsf_plogi *) status_buffer->payload.data;
139
140 zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
141}
142
143static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
144{
145 struct fsf_status_read_buffer *status_buffer =
146 (struct fsf_status_read_buffer *)req->data;
147 struct fcp_logo *els_logo =
148 (struct fcp_logo *) status_buffer->payload.data;
149
150 zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
151}
152
153/**
154 * zfcp_fc_incoming_els - handle incoming ELS
155 * @fsf_req - request which contains incoming ELS
156 */
157void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
158{
159 struct fsf_status_read_buffer *status_buffer =
160 (struct fsf_status_read_buffer *) fsf_req->data;
161 unsigned int els_type = status_buffer->payload.data[0];
162
163 zfcp_san_dbf_event_incoming_els(fsf_req);
164 if (els_type == LS_PLOGI)
165 zfcp_fc_incoming_plogi(fsf_req);
166 else if (els_type == LS_LOGO)
167 zfcp_fc_incoming_logo(fsf_req);
168 else if (els_type == LS_RSCN)
169 zfcp_fc_incoming_rscn(fsf_req);
170}
171
172static void zfcp_ns_gid_pn_handler(unsigned long data)
173{
174 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
175 struct zfcp_send_ct *ct = &gid_pn->ct;
176 struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
177 struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
178 struct zfcp_port *port = gid_pn->port;
179
180 if (ct->status)
181 goto out;
182 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
183 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
184 goto out;
185 }
186 /* paranoia */
187 if (ct_iu_req->wwpn != port->wwpn)
188 goto out;
189 /* looks like a valid d_id */
190 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
191 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
192out:
193 mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
194}
195
196/**
197 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
198 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
199 * return: -ENOMEM on error, 0 otherwise
200 */
201int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
202{
203 int ret;
204 struct zfcp_gid_pn_data *gid_pn;
205 struct zfcp_adapter *adapter = erp_action->adapter;
206
207 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
208 if (!gid_pn)
209 return -ENOMEM;
210
211 memset(gid_pn, 0, sizeof(*gid_pn));
212
213 /* setup parameters for send generic command */
214 gid_pn->port = erp_action->port;
215 gid_pn->ct.port = adapter->nameserver_port;
216 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
217 gid_pn->ct.handler_data = (unsigned long) gid_pn;
218 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
219 gid_pn->ct.req = &gid_pn->req;
220 gid_pn->ct.resp = &gid_pn->resp;
221 gid_pn->ct.req_count = 1;
222 gid_pn->ct.resp_count = 1;
223 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
224 sizeof(struct ct_iu_gid_pn_req));
225 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
226 sizeof(struct ct_iu_gid_pn_resp));
227
228 /* setup nameserver request */
229 gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
230 gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
231 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
232 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
233 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
234 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
235 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
236
237 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
238 erp_action);
239 if (ret)
240 mempool_free(gid_pn, adapter->pool.data_gid_pn);
241 return ret;
242}
243
244/**
245 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
246 * @port: zfcp_port structure
247 * @plogi: plogi payload
248 *
249 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
250 */
251void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
252{
253 port->maxframe_size = plogi->serv_param.common_serv_param[7] |
254 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
255 if (plogi->serv_param.class1_serv_param[0] & 0x80)
256 port->supported_classes |= FC_COS_CLASS1;
257 if (plogi->serv_param.class2_serv_param[0] & 0x80)
258 port->supported_classes |= FC_COS_CLASS2;
259 if (plogi->serv_param.class3_serv_param[0] & 0x80)
260 port->supported_classes |= FC_COS_CLASS3;
261 if (plogi->serv_param.class4_serv_param[0] & 0x80)
262 port->supported_classes |= FC_COS_CLASS4;
263}
264
265struct zfcp_els_adisc {
266 struct zfcp_send_els els;
267 struct scatterlist req;
268 struct scatterlist resp;
269 struct zfcp_ls_adisc ls_adisc;
270 struct zfcp_ls_adisc_acc ls_adisc_acc;
271};
272
273static void zfcp_fc_adisc_handler(unsigned long data)
274{
275 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
276 struct zfcp_port *port = adisc->els.port;
277 struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
278
279 if (adisc->els.status) {
280 /* request rejected or timed out */
281 zfcp_erp_port_forced_reopen(port, 0, 63, NULL);
282 goto out;
283 }
284
285 if (!port->wwnn)
286 port->wwnn = ls_adisc->wwnn;
287
288 if (port->wwpn != ls_adisc->wwpn)
289 zfcp_erp_port_reopen(port, 0, 64, NULL);
290
291 out:
292 zfcp_port_put(port);
293 kfree(adisc);
294}
295
296static int zfcp_fc_adisc(struct zfcp_port *port)
297{
298 struct zfcp_els_adisc *adisc;
299 struct zfcp_adapter *adapter = port->adapter;
300
301 adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
302 if (!adisc)
303 return -ENOMEM;
304
305 adisc->els.req = &adisc->req;
306 adisc->els.resp = &adisc->resp;
307 sg_init_one(adisc->els.req, &adisc->ls_adisc,
308 sizeof(struct zfcp_ls_adisc));
309 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
310 sizeof(struct zfcp_ls_adisc_acc));
311
312 adisc->els.req_count = 1;
313 adisc->els.resp_count = 1;
314 adisc->els.adapter = adapter;
315 adisc->els.port = port;
316 adisc->els.d_id = port->d_id;
317 adisc->els.handler = zfcp_fc_adisc_handler;
318 adisc->els.handler_data = (unsigned long) adisc;
319 adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
320
321 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
322 without FC-AL-2 capability, so we don't set it */
323 adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
324 adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
325 adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
326
327 return zfcp_fsf_send_els(&adisc->els);
328}
329
330/**
331 * zfcp_test_link - lightweight link test procedure
332 * @port: port to be tested
333 *
334 * Test status of a link to a remote port using the ELS command ADISC.
335 * If there is a problem with the remote port, error recovery steps
336 * will be triggered.
337 */
338void zfcp_test_link(struct zfcp_port *port)
339{
340 int retval;
341
342 zfcp_port_get(port);
343 retval = zfcp_fc_adisc(port);
344 if (retval == 0 || retval == -EBUSY)
345 return;
346
347 /* send of ADISC was not possible */
348 zfcp_port_put(port);
349 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
350}
351
352static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
353{
354 int ret;
355
356 if (!adapter->nameserver_port)
357 return -EINTR;
358
359 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
360 &adapter->nameserver_port->status)) {
361 ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
362 NULL);
363 if (ret)
364 return ret;
365 zfcp_erp_wait(adapter);
366 zfcp_port_put(adapter->nameserver_port);
367 }
368 return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
369 &adapter->nameserver_port->status);
370}
371
372static void zfcp_gpn_ft_handler(unsigned long _done)
373{
374 complete((struct completion *)_done);
375}
376
377static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
378{
379 struct scatterlist *sg = &gpn_ft->sg_req;
380
381 kfree(sg_virt(sg)); /* free request buffer */
382 zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
383
384 kfree(gpn_ft);
385}
386
387static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
388{
389 struct zfcp_gpn_ft *gpn_ft;
390 struct ct_iu_gpn_ft_req *req;
391
392 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
393 if (!gpn_ft)
394 return NULL;
395
396 req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
397 if (!req) {
398 kfree(gpn_ft);
399 gpn_ft = NULL;
400 goto out;
401 }
402 sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
403
404 if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
405 zfcp_free_sg_env(gpn_ft);
406 gpn_ft = NULL;
407 }
408out:
409 return gpn_ft;
410}
411
412
413static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
414 struct zfcp_adapter *adapter)
415{
416 struct zfcp_send_ct *ct = &gpn_ft->ct;
417 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
418 struct completion done;
419 int ret;
420
421 /* prepare CT IU for GPN_FT */
422 req->header.revision = ZFCP_CT_REVISION;
423 req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
424 req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
425 req->header.options = ZFCP_CT_SYNCHRONOUS;
426 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
427 req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
428 (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
429 req->flags = 0;
430 req->domain_id_scope = 0;
431 req->area_id_scope = 0;
432 req->fc4_type = ZFCP_CT_SCSI_FCP;
433
434 /* prepare zfcp_send_ct */
435 ct->port = adapter->nameserver_port;
436 ct->handler = zfcp_gpn_ft_handler;
437 ct->handler_data = (unsigned long)&done;
438 ct->timeout = 10;
439 ct->req = &gpn_ft->sg_req;
440 ct->resp = gpn_ft->sg_resp;
441 ct->req_count = 1;
442 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
443
444 init_completion(&done);
445 ret = zfcp_fsf_send_ct(ct, NULL, NULL);
446 if (!ret)
447 wait_for_completion(&done);
448 return ret;
449}
450
451static void zfcp_validate_port(struct zfcp_port *port)
452{
453 struct zfcp_adapter *adapter = port->adapter;
454
455 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
456
457 if (port == adapter->nameserver_port)
458 return;
459 if ((port->supported_classes != 0) || (port->units != 0)) {
460 zfcp_port_put(port);
461 return;
462 }
463 zfcp_erp_port_shutdown(port, 0, 151, NULL);
464 zfcp_erp_wait(adapter);
465 zfcp_port_put(port);
466 zfcp_port_dequeue(port);
467}
468
469static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
470{
471 struct zfcp_send_ct *ct = &gpn_ft->ct;
472 struct scatterlist *sg = gpn_ft->sg_resp;
473 struct ct_hdr *hdr = sg_virt(sg);
474 struct gpn_ft_resp_acc *acc = sg_virt(sg);
475 struct zfcp_adapter *adapter = ct->port->adapter;
476 struct zfcp_port *port, *tmp;
477 u32 d_id;
478 int ret = 0, x;
479
480 if (ct->status)
481 return -EIO;
482
483 if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
484 if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
485 return -EAGAIN; /* might be a temporary condition */
486 return -EIO;
487 }
488
489 if (hdr->max_res_size)
490 return -E2BIG;
491
492 down(&zfcp_data.config_sema);
493
494 /* first entry is the header */
495 for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) {
496 if (x % (ZFCP_GPN_FT_ENTRIES + 1))
497 acc++;
498 else
499 acc = sg_virt(++sg);
500
501 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
502 acc->port_id[2];
503
504 /* skip the adapter's port and known remote ports */
505 if (acc->wwpn == fc_host_port_name(adapter->scsi_host) ||
506 zfcp_get_port_by_did(adapter, d_id))
507 continue;
508
509 port = zfcp_port_enqueue(adapter, acc->wwpn,
510 ZFCP_STATUS_PORT_DID_DID |
511 ZFCP_STATUS_COMMON_NOESC, d_id);
512 if (IS_ERR(port))
513 ret = PTR_ERR(port);
514 else
515 zfcp_erp_port_reopen(port, 0, 149, NULL);
516 if (acc->control & 0x80) /* last entry */
517 break;
518 }
519
520 zfcp_erp_wait(adapter);
521 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
522 zfcp_validate_port(port);
523 up(&zfcp_data.config_sema);
524 return ret;
525}
526
527/**
528 * zfcp_scan_ports - scan remote ports and attach new ports
529 * @adapter: pointer to struct zfcp_adapter
530 */
531int zfcp_scan_ports(struct zfcp_adapter *adapter)
532{
533 int ret, i;
534 struct zfcp_gpn_ft *gpn_ft;
535
536 zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
537 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
538 return 0;
539
540 ret = zfcp_scan_get_nameserver(adapter);
541 if (ret)
542 return ret;
543
544 gpn_ft = zfcp_alloc_sg_env();
545 if (!gpn_ft)
546 return -ENOMEM;
547
548 for (i = 0; i < 3; i++) {
549 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
550 if (!ret) {
551 ret = zfcp_scan_eval_gpn_ft(gpn_ft);
552 if (ret == -EAGAIN)
553 ssleep(1);
554 else
555 break;
556 }
557 }
558 zfcp_free_sg_env(gpn_ft);
559
560 return ret;
561}
562
563
564void _zfcp_scan_ports_later(struct work_struct *work)
565{
566 zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
567}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b2ea4ea051f5..19c1ca913874 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1,54 +1,37 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Implementation of FSF commands.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *); 11static void zfcp_fsf_request_timeout_handler(unsigned long data)
25static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *); 12{
26static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *); 13 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
27static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *); 14 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
28static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *); 15 NULL);
29static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *); 16}
30static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *); 17
31static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *); 18static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *); 19 unsigned long timeout)
33static int zfcp_fsf_send_fcp_command_task_management_handler( 20{
34 struct zfcp_fsf_req *); 21 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *); 22 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *); 23 fsf_req->timer.expires = jiffies + timeout;
37static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *); 24 add_timer(&fsf_req->timer);
38static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *); 25}
39static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *); 26
40static inline int zfcp_fsf_req_sbal_check( 27static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 unsigned long *, struct zfcp_qdio_queue *, int); 28{
42static inline int zfcp_use_one_sbal( 29 BUG_ON(!fsf_req->erp_action);
43 struct scatterlist *, int, struct scatterlist *, int); 30 fsf_req->timer.function = zfcp_erp_timeout_handler;
44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); 31 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45static int zfcp_fsf_req_send(struct zfcp_fsf_req *); 32 fsf_req->timer.expires = jiffies + 30 * HZ;
46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 33 add_timer(&fsf_req->timer);
47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 34}
48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8,
50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52 35
53/* association between FSF command and FSF QTCB type */ 36/* association between FSF command and FSF QTCB type */
54static u32 fsf_qtcb_type[] = { 37static u32 fsf_qtcb_type[] = {
@@ -67,96 +50,77 @@ static u32 fsf_qtcb_type[] = {
67 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
68}; 51};
69 52
70static const char zfcp_act_subtable_type[5][8] = { 53static const char *zfcp_act_subtable_type[] = {
71 "unknown", "OS", "WWPN", "DID", "LUN" 54 "unknown", "OS", "WWPN", "DID", "LUN"
72}; 55};
73 56
74/****************************************************************/ 57static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
75/*************** FSF related Functions *************************/
76/****************************************************************/
77
78#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
79
80/*
81 * function: zfcp_fsf_req_alloc
82 *
83 * purpose: Obtains an fsf_req and potentially a qtcb (for all but
84 * unsolicited requests) via helper functions
85 * Does some initial fsf request set-up.
86 *
87 * returns: pointer to allocated fsf_req if successfull
88 * NULL otherwise
89 *
90 * locks: none
91 *
92 */
93static struct zfcp_fsf_req *
94zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
95{ 58{
96 size_t size; 59 u16 subtable = table >> 16;
97 void *ptr; 60 u16 rule = table & 0xffff;
98 struct zfcp_fsf_req *fsf_req = NULL;
99 61
100 if (req_flags & ZFCP_REQ_NO_QTCB) 62 if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type))
101 size = sizeof(struct zfcp_fsf_req); 63 dev_warn(&adapter->ccw_device->dev,
102 else 64 "Access denied in subtable %s, rule %d.\n",
103 size = sizeof(struct zfcp_fsf_req_qtcb); 65 zfcp_act_subtable_type[subtable], rule);
104 66}
105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else {
108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 GFP_ATOMIC);
113 }
114
115 if (unlikely(!ptr))
116 goto out;
117
118 memset(ptr, 0, size);
119 67
120 if (req_flags & ZFCP_REQ_NO_QTCB) { 68static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
121 fsf_req = (struct zfcp_fsf_req *) ptr; 69 struct zfcp_port *port)
122 } else { 70{
123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req; 71 struct fsf_qtcb_header *header = &req->qtcb->header;
124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb; 72 dev_warn(&req->adapter->ccw_device->dev,
125 } 73 "Access denied, cannot send command to port 0x%016Lx.\n",
74 port->wwpn);
75 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
77 zfcp_erp_port_access_denied(port, 55, req);
78 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
79}
126 80
127 fsf_req->pool = pool; 81static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
82 struct zfcp_unit *unit)
83{
84 struct fsf_qtcb_header *header = &req->qtcb->header;
85 dev_warn(&req->adapter->ccw_device->dev,
86 "Access denied for unit 0x%016Lx on port 0x%016Lx.\n",
87 unit->fcp_lun, unit->port->wwpn);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
89 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
90 zfcp_erp_unit_access_denied(unit, 59, req);
91 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
92}
128 93
129 out: 94static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
130 return fsf_req; 95{
96 dev_err(&req->adapter->ccw_device->dev,
97 "Required FC class not supported by adapter, "
98 "shutting down adapter.\n");
99 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
131} 101}
132 102
133/* 103/**
134 * function: zfcp_fsf_req_free 104 * zfcp_fsf_req_free - free memory used by fsf request
135 * 105 * @fsf_req: pointer to struct zfcp_fsf_req
136 * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or
137 * returns it into the pool via helper functions.
138 *
139 * returns: sod all
140 *
141 * locks: none
142 */ 106 */
143void 107void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
145{ 108{
146 if (likely(fsf_req->pool)) { 109 if (likely(req->pool)) {
147 mempool_free(fsf_req, fsf_req->pool); 110 mempool_free(req, req->pool);
148 return; 111 return;
149 } 112 }
150 113
151 if (fsf_req->qtcb) { 114 if (req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req); 115 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
153 return; 116 return;
154 } 117 }
155
156 kfree(fsf_req);
157} 118}
158 119
159/* 120/**
121 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
122 * @adapter: pointer to struct zfcp_adapter
123 *
160 * Never ever call this without shutting down the adapter first. 124 * Never ever call this without shutting down the adapter first.
161 * Otherwise the adapter would continue using and corrupting s390 storage. 125 * Otherwise the adapter would continue using and corrupting s390 storage.
162 * Included BUG_ON() call to ensure this is done. 126 * Included BUG_ON() call to ensure this is done.
@@ -164,2353 +128,1359 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
164 */ 128 */
165void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 129void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
166{ 130{
167 struct zfcp_fsf_req *fsf_req, *tmp; 131 struct zfcp_fsf_req *req, *tmp;
168 unsigned long flags; 132 unsigned long flags;
169 LIST_HEAD(remove_queue); 133 LIST_HEAD(remove_queue);
170 unsigned int i; 134 unsigned int i;
171 135
172 BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)); 136 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
173 spin_lock_irqsave(&adapter->req_list_lock, flags); 137 spin_lock_irqsave(&adapter->req_list_lock, flags);
174 atomic_set(&adapter->reqs_active, 0);
175 for (i = 0; i < REQUEST_LIST_SIZE; i++) 138 for (i = 0; i < REQUEST_LIST_SIZE; i++)
176 list_splice_init(&adapter->req_list[i], &remove_queue); 139 list_splice_init(&adapter->req_list[i], &remove_queue);
177 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 140 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
178 141
179 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 142 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
180 list_del(&fsf_req->list); 143 list_del(&req->list);
181 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 144 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
182 zfcp_fsf_req_complete(fsf_req); 145 zfcp_fsf_req_complete(req);
183 } 146 }
184} 147}
185 148
186/* 149static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
187 * function: zfcp_fsf_req_complete
188 *
189 * purpose: Updates active counts and timers for openfcp-reqs
190 * May cleanup request after req_eval returns
191 *
192 * returns: 0 - success
193 * !0 - failure
194 *
195 * context:
196 */
197int
198zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
199{ 150{
200 int retval = 0; 151 struct fsf_status_read_buffer *sr_buf = req->data;
201 int cleanup; 152 struct zfcp_adapter *adapter = req->adapter;
202 153 struct zfcp_port *port;
203 if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 154 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
204 ZFCP_LOG_DEBUG("Status read response received\n"); 155 unsigned long flags;
205 /*
206 * Note: all cleanup handling is done in the callchain of
207 * the function call-chain below.
208 */
209 zfcp_fsf_status_read_handler(fsf_req);
210 goto out;
211 } else {
212 del_timer(&fsf_req->timer);
213 zfcp_fsf_protstatus_eval(fsf_req);
214 }
215
216 /*
217 * fsf_req may be deleted due to waking up functions, so
218 * cleanup is saved here and used later
219 */
220 if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
221 cleanup = 1;
222 else
223 cleanup = 0;
224
225 fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
226 156
227 /* cleanup request if requested by initiator */ 157 read_lock_irqsave(&zfcp_data.config_lock, flags);
228 if (likely(cleanup)) { 158 list_for_each_entry(port, &adapter->port_list_head, list)
229 ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req); 159 if (port->d_id == d_id) {
230 /* 160 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
231 * lock must not be held here since it will be 161 switch (sr_buf->status_subtype) {
232 * grabed by the called routine, too 162 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
233 */ 163 zfcp_erp_port_reopen(port, 0, 101, req);
234 zfcp_fsf_req_free(fsf_req); 164 break;
235 } else { 165 case FSF_STATUS_READ_SUB_ERROR_PORT:
236 /* notify initiator waiting for the requests completion */ 166 zfcp_erp_port_shutdown(port, 0, 122, req);
237 ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req); 167 break;
238 /* 168 }
239 * FIXME: Race! We must not access fsf_req here as it might have been 169 return;
240 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED 170 }
241 * flag. It's an improbable case. But, we have the same paranoia for 171 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
242 * the cleanup flag already. 172}
243 * Might better be handled using complete()?
244 * (setting the flag and doing wakeup ought to be atomic
245 * with regard to checking the flag as long as waitqueue is
246 * part of the to be released structure)
247 */
248 wake_up(&fsf_req->completion_wq);
249 }
250 173
251 out: 174static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
252 return retval; 175{
176 struct zfcp_adapter *adapter = req->adapter;
177 struct fsf_status_read_buffer *sr_buf = req->data;
178 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
179
180 dev_warn(&adapter->ccw_device->dev,
181 "Warning: bit error threshold data "
182 "received for the adapter: "
183 "link failures = %i, loss of sync errors = %i, "
184 "loss of signal errors = %i, "
185 "primitive sequence errors = %i, "
186 "invalid transmission word errors = %i, "
187 "CRC errors = %i).\n",
188 err->link_failure_error_count,
189 err->loss_of_sync_error_count,
190 err->loss_of_signal_error_count,
191 err->primitive_sequence_error_count,
192 err->invalid_transmission_word_error_count,
193 err->crc_error_count);
194 dev_warn(&adapter->ccw_device->dev,
195 "Additional bit error threshold data of the adapter: "
196 "primitive sequence event time-outs = %i, "
197 "elastic buffer overrun errors = %i, "
198 "advertised receive buffer-to-buffer credit = %i, "
199 "current receice buffer-to-buffer credit = %i, "
200 "advertised transmit buffer-to-buffer credit = %i, "
201 "current transmit buffer-to-buffer credit = %i).\n",
202 err->primitive_sequence_event_timeout_count,
203 err->elastic_buffer_overrun_error_count,
204 err->advertised_receive_b2b_credit,
205 err->current_receive_b2b_credit,
206 err->advertised_transmit_b2b_credit,
207 err->current_transmit_b2b_credit);
253} 208}
254 209
255/* 210static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
256 * function: zfcp_fsf_protstatus_eval 211 struct fsf_link_down_info *link_down)
257 *
258 * purpose: evaluates the QTCB of the finished FSF request
259 * and initiates appropriate actions
260 * (usually calling FSF command specific handlers)
261 *
262 * returns:
263 *
264 * context:
265 *
266 * locks:
267 */
268static int
269zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
270{ 212{
271 int retval = 0; 213 struct zfcp_adapter *adapter = req->adapter;
272 struct zfcp_adapter *adapter = fsf_req->adapter;
273 struct fsf_qtcb *qtcb = fsf_req->qtcb;
274 union fsf_prot_status_qual *prot_status_qual =
275 &qtcb->prefix.prot_status_qual;
276
277 zfcp_hba_dbf_event_fsf_response(fsf_req);
278
279 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
280 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
281 (unsigned long) fsf_req);
282 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
283 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
284 goto skip_protstatus;
285 }
286 214
287 /* evaluate FSF Protocol Status */ 215 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
288 switch (qtcb->prefix.prot_status) { 216 return;
289 217
290 case FSF_PROT_GOOD: 218 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
291 case FSF_PROT_FSF_STATUS_PRESENTED:
292 break;
293 219
294 case FSF_PROT_QTCB_VERSION_ERROR: 220 if (!link_down)
295 ZFCP_LOG_NORMAL("error: The adapter %s contains " 221 goto out;
296 "microcode of version 0x%x, the device driver "
297 "only supports 0x%x. Aborting.\n",
298 zfcp_get_busid_by_adapter(adapter),
299 prot_status_qual->version_error.fsf_version,
300 ZFCP_QTCB_VERSION);
301 zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req);
302 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
303 break;
304 222
305 case FSF_PROT_SEQ_NUMB_ERROR: 223 switch (link_down->error_code) {
306 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " 224 case FSF_PSQ_LINK_NO_LIGHT:
307 "driver (0x%x) and adapter %s (0x%x). " 225 dev_warn(&req->adapter->ccw_device->dev,
308 "Restarting all operations on this adapter.\n", 226 "The local link is down: no light detected.\n");
309 qtcb->prefix.req_seq_no,
310 zfcp_get_busid_by_adapter(adapter),
311 prot_status_qual->sequence_error.exp_req_seq_no);
312 zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req);
313 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
314 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
315 break; 227 break;
316 228 case FSF_PSQ_LINK_WRAP_PLUG:
317 case FSF_PROT_UNSUPP_QTCB_TYPE: 229 dev_warn(&req->adapter->ccw_device->dev,
318 ZFCP_LOG_NORMAL("error: Packet header type used by the " 230 "The local link is down: wrap plug detected.\n");
319 "device driver is incompatible with "
320 "that used on adapter %s. "
321 "Stopping all operations on this adapter.\n",
322 zfcp_get_busid_by_adapter(adapter));
323 zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req);
324 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
325 break; 231 break;
326 232 case FSF_PSQ_LINK_NO_FCP:
327 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 233 dev_warn(&req->adapter->ccw_device->dev,
328 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 234 "The local link is down: "
329 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 235 "adjacent node on link does not support FCP.\n");
330 &(adapter->status));
331 break; 236 break;
332 237 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
333 case FSF_PROT_DUPLICATE_REQUEST_ID: 238 dev_warn(&req->adapter->ccw_device->dev,
334 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " 239 "The local link is down: "
335 "to the adapter %s is ambiguous. " 240 "firmware update in progress.\n");
336 "Stopping all operations on this adapter.\n",
337 *(unsigned long long*)
338 (&qtcb->bottom.support.req_handle),
339 zfcp_get_busid_by_adapter(adapter));
340 zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req);
341 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
342 break; 241 break;
343 242 case FSF_PSQ_LINK_INVALID_WWPN:
344 case FSF_PROT_LINK_DOWN: 243 dev_warn(&req->adapter->ccw_device->dev,
345 zfcp_fsf_link_down_info_eval(fsf_req, 37, 244 "The local link is down: "
346 &prot_status_qual->link_down_info); 245 "duplicate or invalid WWPN detected.\n");
347 /* FIXME: reopening adapter now? better wait for link up */
348 zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req);
349 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
350 break; 246 break;
351 247 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
352 case FSF_PROT_REEST_QUEUE: 248 dev_warn(&req->adapter->ccw_device->dev,
353 ZFCP_LOG_NORMAL("The local link to adapter with " 249 "The local link is down: "
354 "%s was re-plugged. " 250 "no support for NPIV by Fabric.\n");
355 "Re-starting operations on this adapter.\n",
356 zfcp_get_busid_by_adapter(adapter));
357 /* All ports should be marked as ready to run again */
358 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
359 ZFCP_STATUS_COMMON_RUNNING,
360 ZFCP_SET);
361 zfcp_erp_adapter_reopen(adapter,
362 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
363 | ZFCP_STATUS_COMMON_ERP_FAILED,
364 99, fsf_req);
365 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
366 break; 251 break;
367 252 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
368 case FSF_PROT_ERROR_STATE: 253 dev_warn(&req->adapter->ccw_device->dev,
369 ZFCP_LOG_NORMAL("error: The adapter %s " 254 "The local link is down: "
370 "has entered the error state. " 255 "out of resource in FCP daughtercard.\n");
371 "Restarting all operations on this " 256 break;
372 "adapter.\n", 257 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
373 zfcp_get_busid_by_adapter(adapter)); 258 dev_warn(&req->adapter->ccw_device->dev,
374 zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req); 259 "The local link is down: "
375 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 260 "out of resource in Fabric.\n");
376 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 261 break;
262 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
263 dev_warn(&req->adapter->ccw_device->dev,
264 "The local link is down: "
265 "unable to login to Fabric.\n");
266 break;
267 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
268 dev_warn(&req->adapter->ccw_device->dev,
269 "WWPN assignment file corrupted on adapter.\n");
270 break;
271 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
272 dev_warn(&req->adapter->ccw_device->dev,
273 "Mode table corrupted on adapter.\n");
274 break;
275 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
276 dev_warn(&req->adapter->ccw_device->dev,
277 "No WWPN for assignment table on adapter.\n");
377 break; 278 break;
378
379 default: 279 default:
380 ZFCP_LOG_NORMAL("bug: Transfer protocol status information " 280 dev_warn(&req->adapter->ccw_device->dev,
381 "provided by the adapter %s " 281 "The local link to adapter is down.\n");
382 "is not compatible with the device driver. "
383 "Stopping all operations on this adapter. "
384 "(debug info 0x%x).\n",
385 zfcp_get_busid_by_adapter(adapter),
386 qtcb->prefix.prot_status);
387 zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req);
388 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
389 } 282 }
283out:
284 zfcp_erp_adapter_failed(adapter, id, req);
285}
390 286
391 skip_protstatus: 287static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
392 /* 288{
393 * always call specific handlers to give them a chance to do 289 struct zfcp_adapter *adapter = req->adapter;
394 * something meaningful even in error cases 290 struct fsf_status_read_buffer *sr_buf = req->data;
395 */ 291 struct fsf_link_down_info *ldi =
396 zfcp_fsf_fsfstatus_eval(fsf_req); 292 (struct fsf_link_down_info *) &sr_buf->payload;
397 return retval; 293
294 switch (sr_buf->status_subtype) {
295 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
296 dev_warn(&adapter->ccw_device->dev,
297 "Physical link is down.\n");
298 zfcp_fsf_link_down_info_eval(req, 38, ldi);
299 break;
300 case FSF_STATUS_READ_SUB_FDISC_FAILED:
301 dev_warn(&adapter->ccw_device->dev,
302 "Local link is down "
303 "due to failed FDISC login.\n");
304 zfcp_fsf_link_down_info_eval(req, 39, ldi);
305 break;
306 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
307 dev_warn(&adapter->ccw_device->dev,
308 "Local link is down "
309 "due to firmware update on adapter.\n");
310 zfcp_fsf_link_down_info_eval(req, 40, NULL);
311 };
398} 312}
399 313
400/* 314static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
401 * function: zfcp_fsf_fsfstatus_eval
402 *
403 * purpose: evaluates FSF status of completed FSF request
404 * and acts accordingly
405 *
406 * returns:
407 */
408static int
409zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
410{ 315{
411 int retval = 0; 316 struct zfcp_adapter *adapter = req->adapter;
317 struct fsf_status_read_buffer *sr_buf = req->data;
412 318
413 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 319 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
414 goto skip_fsfstatus; 320 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
321 mempool_free(sr_buf, adapter->pool.data_status_read);
322 zfcp_fsf_req_free(req);
323 return;
415 } 324 }
416 325
417 /* evaluate FSF Status */ 326 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
418 switch (fsf_req->qtcb->header.fsf_status) {
419 case FSF_UNKNOWN_COMMAND:
420 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
421 "not known by the adapter %s "
422 "Stopping all operations on this adapter. "
423 "(debug info 0x%x).\n",
424 zfcp_get_busid_by_adapter(fsf_req->adapter),
425 fsf_req->qtcb->header.fsf_command);
426 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req);
427 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
428 break;
429 327
430 case FSF_FCP_RSP_AVAILABLE: 328 switch (sr_buf->status_type) {
431 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " 329 case FSF_STATUS_READ_PORT_CLOSED:
432 "SCSI stack.\n"); 330 zfcp_fsf_status_read_port_closed(req);
433 break; 331 break;
434 332 case FSF_STATUS_READ_INCOMING_ELS:
435 case FSF_ADAPTER_STATUS_AVAILABLE: 333 zfcp_fc_incoming_els(req);
436 zfcp_fsf_fsfstatus_qual_eval(fsf_req); 334 break;
335 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
336 break;
337 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
338 zfcp_fsf_bit_error_threshold(req);
339 break;
340 case FSF_STATUS_READ_LINK_DOWN:
341 zfcp_fsf_status_read_link_down(req);
342 break;
343 case FSF_STATUS_READ_LINK_UP:
344 dev_info(&adapter->ccw_device->dev,
345 "Local link was replugged.\n");
346 /* All ports should be marked as ready to run again */
347 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
348 ZFCP_STATUS_COMMON_RUNNING,
349 ZFCP_SET);
350 zfcp_erp_adapter_reopen(adapter,
351 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
352 ZFCP_STATUS_COMMON_ERP_FAILED,
353 102, req);
354 break;
355 case FSF_STATUS_READ_NOTIFICATION_LOST:
356 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
357 zfcp_erp_adapter_access_changed(adapter, 135, req);
358 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
359 schedule_work(&adapter->scan_work);
360 break;
361 case FSF_STATUS_READ_CFDC_UPDATED:
362 zfcp_erp_adapter_access_changed(adapter, 136, req);
363 break;
364 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
365 adapter->adapter_features = sr_buf->payload.word[0];
437 break; 366 break;
438 } 367 }
439 368
440 skip_fsfstatus: 369 mempool_free(sr_buf, adapter->pool.data_status_read);
441 /* 370 zfcp_fsf_req_free(req);
442 * always call specific handlers to give them a chance to do
443 * something meaningful even in error cases
444 */
445 zfcp_fsf_req_dispatch(fsf_req);
446 371
447 return retval; 372 atomic_inc(&adapter->stat_miss);
373 schedule_work(&adapter->stat_work);
448} 374}
449 375
450/* 376static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
451 * function: zfcp_fsf_fsfstatus_qual_eval
452 *
453 * purpose: evaluates FSF status-qualifier of completed FSF request
454 * and acts accordingly
455 *
456 * returns:
457 */
458static int
459zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
460{ 377{
461 int retval = 0; 378 switch (req->qtcb->header.fsf_status_qual.word[0]) {
462
463 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
464 case FSF_SQ_FCP_RSP_AVAILABLE: 379 case FSF_SQ_FCP_RSP_AVAILABLE:
465 break;
466 case FSF_SQ_RETRY_IF_POSSIBLE:
467 /* The SCSI-stack may now issue retries or escalate */
468 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
469 break;
470 case FSF_SQ_COMMAND_ABORTED:
471 /* Carry the aborted state on to upper layer */
472 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
473 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
474 break;
475 case FSF_SQ_NO_RECOM:
476 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a "
477 "problem on the adapter %s "
478 "Stopping all operations on this adapter. ",
479 zfcp_get_busid_by_adapter(fsf_req->adapter));
480 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req);
481 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
482 break;
483 case FSF_SQ_ULP_PROGRAMMING_ERROR:
484 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
485 "(adapter %s)\n",
486 zfcp_get_busid_by_adapter(fsf_req->adapter));
487 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
488 break;
489 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 380 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
490 case FSF_SQ_NO_RETRY_POSSIBLE: 381 case FSF_SQ_NO_RETRY_POSSIBLE:
491 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 382 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
492 /* dealt with in the respective functions */ 383 return;
384 case FSF_SQ_COMMAND_ABORTED:
385 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
493 break; 386 break;
494 default: 387 case FSF_SQ_NO_RECOM:
495 ZFCP_LOG_NORMAL("bug: Additional status info could " 388 dev_err(&req->adapter->ccw_device->dev,
496 "not be interpreted properly.\n"); 389 "No recommendation could be given for a "
497 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 390 "problem on the adapter.\n");
498 (char *) &fsf_req->qtcb->header.fsf_status_qual, 391 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
499 sizeof (union fsf_status_qual));
500 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
501 break; 392 break;
502 } 393 }
503 394 /* all non-return stats set FSFREQ_ERROR*/
504 return retval; 395 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
505} 396}
506 397
507/** 398static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
508 * zfcp_fsf_link_down_info_eval - evaluate link down information block
509 */
510static void
511zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id,
512 struct fsf_link_down_info *link_down)
513{ 399{
514 struct zfcp_adapter *adapter = fsf_req->adapter; 400 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
515
516 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
517 &adapter->status))
518 return; 401 return;
519 402
520 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 403 switch (req->qtcb->header.fsf_status) {
521 404 case FSF_UNKNOWN_COMMAND:
522 if (link_down == NULL) 405 dev_err(&req->adapter->ccw_device->dev,
523 goto out; 406 "Command issued by the device driver (0x%x) is "
524 407 "not known by the adapter.\n",
525 switch (link_down->error_code) { 408 req->qtcb->header.fsf_command);
526 case FSF_PSQ_LINK_NO_LIGHT: 409 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
527 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 410 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
528 "(no light detected)\n",
529 zfcp_get_busid_by_adapter(adapter));
530 break;
531 case FSF_PSQ_LINK_WRAP_PLUG:
532 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
533 "(wrap plug detected)\n",
534 zfcp_get_busid_by_adapter(adapter));
535 break;
536 case FSF_PSQ_LINK_NO_FCP:
537 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
538 "(adjacent node on link does not support FCP)\n",
539 zfcp_get_busid_by_adapter(adapter));
540 break; 411 break;
541 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 412 case FSF_ADAPTER_STATUS_AVAILABLE:
542 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 413 zfcp_fsf_fsfstatus_qual_eval(req);
543 "(firmware update in progress)\n",
544 zfcp_get_busid_by_adapter(adapter));
545 break;
546 case FSF_PSQ_LINK_INVALID_WWPN:
547 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
548 "(duplicate or invalid WWPN detected)\n",
549 zfcp_get_busid_by_adapter(adapter));
550 break; 414 break;
551 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 415 }
552 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 416}
553 "(no support for NPIV by Fabric)\n", 417
554 zfcp_get_busid_by_adapter(adapter)); 418static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
419{
420 struct zfcp_adapter *adapter = req->adapter;
421 struct fsf_qtcb *qtcb = req->qtcb;
422 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
423
424 zfcp_hba_dbf_event_fsf_response(req);
425
426 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
427 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
428 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
429 return;
430 }
431
432 switch (qtcb->prefix.prot_status) {
433 case FSF_PROT_GOOD:
434 case FSF_PROT_FSF_STATUS_PRESENTED:
435 return;
436 case FSF_PROT_QTCB_VERSION_ERROR:
437 dev_err(&adapter->ccw_device->dev,
438 "The QTCB version requested by zfcp (0x%x) is not "
439 "supported by the FCP adapter (lowest supported "
440 "0x%x, highest supported 0x%x).\n",
441 FSF_QTCB_CURRENT_VERSION, psq->word[0],
442 psq->word[1]);
443 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
555 break; 444 break;
556 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 445 case FSF_PROT_ERROR_STATE:
557 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 446 case FSF_PROT_SEQ_NUMB_ERROR:
558 "(out of resource in FCP daughtercard)\n", 447 zfcp_erp_adapter_reopen(adapter, 0, 98, req);
559 zfcp_get_busid_by_adapter(adapter)); 448 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
560 break; 449 break;
561 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 450 case FSF_PROT_UNSUPP_QTCB_TYPE:
562 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 451 dev_err(&adapter->ccw_device->dev,
563 "(out of resource in Fabric)\n", 452 "Packet header type used by the device driver is "
564 zfcp_get_busid_by_adapter(adapter)); 453 "incompatible with that used on the adapter.\n");
454 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
565 break; 455 break;
566 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 456 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
567 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 457 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
568 "(unable to Fabric login)\n", 458 &adapter->status);
569 zfcp_get_busid_by_adapter(adapter));
570 break; 459 break;
571 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 460 case FSF_PROT_DUPLICATE_REQUEST_ID:
572 ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n", 461 dev_err(&adapter->ccw_device->dev,
573 zfcp_get_busid_by_adapter(adapter)); 462 "The request identifier 0x%Lx is ambiguous.\n",
463 (unsigned long long)qtcb->bottom.support.req_handle);
464 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
574 break; 465 break;
575 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 466 case FSF_PROT_LINK_DOWN:
576 ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n", 467 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
577 zfcp_get_busid_by_adapter(adapter)); 468 /* FIXME: reopening adapter now? better wait for link up */
469 zfcp_erp_adapter_reopen(adapter, 0, 79, req);
578 break; 470 break;
579 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 471 case FSF_PROT_REEST_QUEUE:
580 ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n", 472 /* All ports should be marked as ready to run again */
581 zfcp_get_busid_by_adapter(adapter)); 473 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
474 ZFCP_STATUS_COMMON_RUNNING,
475 ZFCP_SET);
476 zfcp_erp_adapter_reopen(adapter,
477 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
478 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
582 break; 479 break;
583 default: 480 default:
584 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 481 dev_err(&adapter->ccw_device->dev,
585 "(warning: unknown reason code %d)\n", 482 "Transfer protocol status information"
586 zfcp_get_busid_by_adapter(adapter), 483 "provided by the adapter (0x%x) "
587 link_down->error_code); 484 "is not compatible with the device driver.\n",
588 } 485 qtcb->prefix.prot_status);
589 486 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
590 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 487 }
591 ZFCP_LOG_DEBUG("Debug information to link down: " 488 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
592 "primary_status=0x%02x "
593 "ioerr_code=0x%02x "
594 "action_code=0x%02x "
595 "reason_code=0x%02x "
596 "explanation_code=0x%02x "
597 "vendor_specific_code=0x%02x\n",
598 link_down->primary_status,
599 link_down->ioerr_code,
600 link_down->action_code,
601 link_down->reason_code,
602 link_down->explanation_code,
603 link_down->vendor_specific_code);
604
605 out:
606 zfcp_erp_adapter_failed(adapter, id, fsf_req);
607} 489}
608 490
609/* 491/**
610 * function: zfcp_fsf_req_dispatch 492 * zfcp_fsf_req_complete - process completion of a FSF request
611 * 493 * @fsf_req: The FSF request that has been completed.
612 * purpose: calls the appropriate command specific handler
613 * 494 *
614 * returns: 495 * When a request has been completed either from the FCP adapter,
496 * or it has been dismissed due to a queue shutdown, this function
497 * is called to process the completion status and trigger further
498 * events related to the FSF request.
615 */ 499 */
616static int 500void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
617zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
618{ 501{
619 struct zfcp_erp_action *erp_action = fsf_req->erp_action; 502 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
620 struct zfcp_adapter *adapter = fsf_req->adapter; 503 zfcp_fsf_status_read_handler(req);
621 int retval = 0; 504 return;
505 }
622 506
507 del_timer(&req->timer);
508 zfcp_fsf_protstatus_eval(req);
509 zfcp_fsf_fsfstatus_eval(req);
510 req->handler(req);
623 511
624 switch (fsf_req->fsf_command) { 512 if (req->erp_action)
513 zfcp_erp_notify(req->erp_action, 0);
514 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
625 515
626 case FSF_QTCB_FCP_CMND: 516 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
627 zfcp_fsf_send_fcp_command_handler(fsf_req); 517 zfcp_fsf_req_free(req);
628 break; 518 else
519 /* notify initiator waiting for the requests completion */
520 /*
521 * FIXME: Race! We must not access fsf_req here as it might have been
522 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
523 * flag. It's an improbable case. But, we have the same paranoia for
524 * the cleanup flag already.
525 * Might better be handled using complete()?
526 * (setting the flag and doing wakeup ought to be atomic
527 * with regard to checking the flag as long as waitqueue is
528 * part of the to be released structure)
529 */
530 wake_up(&req->completion_wq);
531}
629 532
630 case FSF_QTCB_ABORT_FCP_CMND: 533static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
631 zfcp_fsf_abort_fcp_command_handler(fsf_req); 534{
632 break; 535 struct fsf_qtcb_bottom_config *bottom;
536 struct zfcp_adapter *adapter = req->adapter;
537 struct Scsi_Host *shost = adapter->scsi_host;
633 538
634 case FSF_QTCB_SEND_GENERIC: 539 bottom = &req->qtcb->bottom.config;
635 zfcp_fsf_send_ct_handler(fsf_req);
636 break;
637 540
638 case FSF_QTCB_OPEN_PORT_WITH_DID: 541 if (req->data)
639 zfcp_fsf_open_port_handler(fsf_req); 542 memcpy(req->data, bottom, sizeof(*bottom));
640 break;
641 543
642 case FSF_QTCB_OPEN_LUN: 544 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
643 zfcp_fsf_open_unit_handler(fsf_req); 545 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
644 break; 546 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
547 fc_host_speed(shost) = bottom->fc_link_speed;
548 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
645 549
646 case FSF_QTCB_CLOSE_LUN: 550 adapter->hydra_version = bottom->adapter_type;
647 zfcp_fsf_close_unit_handler(fsf_req); 551 adapter->timer_ticks = bottom->timer_interval;
648 break;
649 552
650 case FSF_QTCB_CLOSE_PORT: 553 if (fc_host_permanent_port_name(shost) == -1)
651 zfcp_fsf_close_port_handler(fsf_req); 554 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
652 break;
653 555
654 case FSF_QTCB_CLOSE_PHYSICAL_PORT: 556 switch (bottom->fc_topology) {
655 zfcp_fsf_close_physical_port_handler(fsf_req); 557 case FSF_TOPO_P2P:
656 break; 558 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
559 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
560 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
561 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
562 if (req->erp_action)
563 dev_info(&adapter->ccw_device->dev,
564 "Point-to-Point fibrechannel "
565 "configuration detected.\n");
566 break;
567 case FSF_TOPO_FABRIC:
568 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
569 if (req->erp_action)
570 dev_info(&adapter->ccw_device->dev,
571 "Switched fabric fibrechannel "
572 "network detected.\n");
573 break;
574 case FSF_TOPO_AL:
575 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
576 dev_err(&adapter->ccw_device->dev,
577 "Unsupported arbitrated loop fibrechannel "
578 "topology detected, shutting down "
579 "adapter.\n");
580 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
581 return -EIO;
582 default:
583 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
584 dev_err(&adapter->ccw_device->dev,
585 "The fibrechannel topology reported by the"
586 " adapter is not known by the zfcp driver,"
587 " shutting down adapter.\n");
588 zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
589 return -EIO;
590 }
657 591
658 case FSF_QTCB_EXCHANGE_CONFIG_DATA: 592 return 0;
659 zfcp_fsf_exchange_config_data_handler(fsf_req); 593}
660 break;
661 594
662 case FSF_QTCB_EXCHANGE_PORT_DATA: 595static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
663 zfcp_fsf_exchange_port_data_handler(fsf_req); 596{
664 break; 597 struct zfcp_adapter *adapter = req->adapter;
598 struct fsf_qtcb *qtcb = req->qtcb;
599 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
600 struct Scsi_Host *shost = adapter->scsi_host;
665 601
666 case FSF_QTCB_SEND_ELS: 602 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
667 zfcp_fsf_send_els_handler(fsf_req); 603 return;
668 break;
669 604
670 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 605 adapter->fsf_lic_version = bottom->lic_version;
671 zfcp_fsf_control_file_handler(fsf_req); 606 adapter->adapter_features = bottom->adapter_features;
672 break; 607 adapter->connection_features = bottom->connection_features;
608 adapter->peer_wwpn = 0;
609 adapter->peer_wwnn = 0;
610 adapter->peer_d_id = 0;
673 611
674 case FSF_QTCB_UPLOAD_CONTROL_FILE: 612 switch (qtcb->header.fsf_status) {
675 zfcp_fsf_control_file_handler(fsf_req); 613 case FSF_GOOD:
614 if (zfcp_fsf_exchange_config_evaluate(req))
615 return;
616
617 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
618 dev_err(&adapter->ccw_device->dev,
619 "Maximum QTCB size (%d bytes) allowed by "
620 "the adapter is lower than the minimum "
621 "required by the driver (%ld bytes).\n",
622 bottom->max_qtcb_size,
623 sizeof(struct fsf_qtcb));
624 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
625 return;
626 }
627 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
628 &adapter->status);
676 break; 629 break;
630 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
631 fc_host_node_name(shost) = 0;
632 fc_host_port_name(shost) = 0;
633 fc_host_port_id(shost) = 0;
634 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
635 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
636 adapter->hydra_version = 0;
677 637
638 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
639 &adapter->status);
640
641 zfcp_fsf_link_down_info_eval(req, 42,
642 &qtcb->header.fsf_status_qual.link_down_info);
643 break;
678 default: 644 default:
679 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 645 zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
680 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " 646 return;
681 "not supported by the adapter %s\n",
682 zfcp_get_busid_by_adapter(adapter));
683 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
684 ZFCP_LOG_NORMAL
685 ("bug: Command issued by the device driver differs "
686 "from the command returned by the adapter %s "
687 "(debug info 0x%x, 0x%x).\n",
688 zfcp_get_busid_by_adapter(adapter),
689 fsf_req->fsf_command,
690 fsf_req->qtcb->header.fsf_command);
691 } 647 }
692 648
693 if (!erp_action) 649 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
694 return retval; 650 adapter->hardware_version = bottom->hardware_version;
695 651 memcpy(fc_host_serial_number(shost), bottom->serial_number,
696 zfcp_erp_async_handler(erp_action, 0); 652 min(FC_SERIAL_NUMBER_SIZE, 17));
653 EBCASC(fc_host_serial_number(shost),
654 min(FC_SERIAL_NUMBER_SIZE, 17));
655 }
697 656
698 return retval; 657 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
658 dev_err(&adapter->ccw_device->dev,
659 "The adapter only supports newer control block "
660 "versions, try updated device driver.\n");
661 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
662 return;
663 }
664 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
665 dev_err(&adapter->ccw_device->dev,
666 "The adapter only supports older control block "
667 "versions, consider a microcode upgrade.\n");
668 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
669 }
699} 670}
700 671
701/* 672static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
702 * function: zfcp_fsf_status_read
703 *
704 * purpose: initiates a Status Read command at the specified adapter
705 *
706 * returns:
707 */
708int
709zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
710{ 673{
711 struct zfcp_fsf_req *fsf_req; 674 struct zfcp_adapter *adapter = req->adapter;
712 struct fsf_status_read_buffer *status_buffer; 675 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
713 unsigned long lock_flags; 676 struct Scsi_Host *shost = adapter->scsi_host;
714 volatile struct qdio_buffer_element *sbale;
715 int retval = 0;
716
717 /* setup new FSF request */
718 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
719 req_flags | ZFCP_REQ_NO_QTCB,
720 adapter->pool.fsf_req_status_read,
721 &lock_flags, &fsf_req);
722 if (retval < 0) {
723 ZFCP_LOG_INFO("error: Could not create unsolicited status "
724 "buffer for adapter %s.\n",
725 zfcp_get_busid_by_adapter(adapter));
726 goto failed_req_create;
727 }
728
729 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
730 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
731 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
732 fsf_req->sbale_curr = 2;
733 677
734 status_buffer = 678 if (req->data)
735 mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); 679 memcpy(req->data, bottom, sizeof(*bottom));
736 if (!status_buffer) {
737 ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
738 goto failed_buf;
739 }
740 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
741 fsf_req->data = (unsigned long) status_buffer;
742 680
743 /* insert pointer to respective buffer */ 681 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
744 sbale = zfcp_qdio_sbale_curr(fsf_req); 682 fc_host_permanent_port_name(shost) = bottom->wwpn;
745 sbale->addr = (void *) status_buffer; 683 else
746 sbale->length = sizeof(struct fsf_status_read_buffer); 684 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
685 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
686 fc_host_supported_speeds(shost) = bottom->supported_speed;
687}
747 688
748 retval = zfcp_fsf_req_send(fsf_req); 689static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
749 if (retval) { 690{
750 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " 691 struct zfcp_adapter *adapter = req->adapter;
751 "environment.\n"); 692 struct fsf_qtcb *qtcb = req->qtcb;
752 goto failed_req_send;
753 }
754 693
755 ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n", 694 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
756 zfcp_get_busid_by_adapter(adapter)); 695 return;
757 goto out;
758 696
759 failed_req_send: 697 switch (qtcb->header.fsf_status) {
760 mempool_free(status_buffer, adapter->pool.data_status_read); 698 case FSF_GOOD:
699 zfcp_fsf_exchange_port_evaluate(req);
700 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
701 break;
702 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
703 zfcp_fsf_exchange_port_evaluate(req);
704 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
705 zfcp_fsf_link_down_info_eval(req, 43,
706 &qtcb->header.fsf_status_qual.link_down_info);
707 break;
708 }
709}
761 710
762 failed_buf: 711static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue)
763 zfcp_fsf_req_free(fsf_req); 712{
764 failed_req_create: 713 spin_lock(&queue->lock);
765 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 714 if (atomic_read(&queue->count))
766 out: 715 return 1;
767 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 716 spin_unlock(&queue->lock);
768 return retval; 717 return 0;
769} 718}
770 719
771static int 720static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
772zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
773{ 721{
774 struct fsf_status_read_buffer *status_buffer; 722 long ret;
775 struct zfcp_adapter *adapter; 723 struct zfcp_qdio_queue *req_q = &adapter->req_q;
776 struct zfcp_port *port;
777 unsigned long flags;
778 724
779 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data; 725 spin_unlock(&req_q->lock);
780 adapter = fsf_req->adapter; 726 ret = wait_event_interruptible_timeout(adapter->request_wq,
727 zfcp_fsf_sbal_check(req_q), 5 * HZ);
728 if (ret > 0)
729 return 0;
781 730
782 read_lock_irqsave(&zfcp_data.config_lock, flags); 731 spin_lock(&req_q->lock);
783 list_for_each_entry(port, &adapter->port_list_head, list) 732 return -EIO;
784 if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK)) 733}
785 break;
786 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
787 734
788 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { 735static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
789 ZFCP_LOG_NORMAL("bug: Reopen port indication received for " 736{
790 "nonexisting port with d_id 0x%06x on " 737 struct zfcp_fsf_req *req;
791 "adapter %s. Ignored.\n", 738 req = mempool_alloc(pool, GFP_ATOMIC);
792 status_buffer->d_id & ZFCP_DID_MASK, 739 if (!req)
793 zfcp_get_busid_by_adapter(adapter)); 740 return NULL;
794 goto out; 741 memset(req, 0, sizeof(*req));
795 } 742 return req;
743}
796 744
797 switch (status_buffer->status_subtype) { 745static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
746{
747 struct zfcp_fsf_req_qtcb *qtcb;
798 748
799 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: 749 if (likely(pool))
800 zfcp_erp_port_reopen(port, 0, 101, fsf_req); 750 qtcb = mempool_alloc(pool, GFP_ATOMIC);
801 break; 751 else
752 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
753 GFP_ATOMIC);
754 if (unlikely(!qtcb))
755 return NULL;
802 756
803 case FSF_STATUS_READ_SUB_ERROR_PORT: 757 memset(qtcb, 0, sizeof(*qtcb));
804 zfcp_erp_port_shutdown(port, 0, 122, fsf_req); 758 qtcb->fsf_req.qtcb = &qtcb->qtcb;
805 break; 759 qtcb->fsf_req.pool = pool;
806 760
807 default: 761 return &qtcb->fsf_req;
808 ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
809 "for a reopen indication on port with "
810 "d_id 0x%06x on the adapter %s. "
811 "Ignored. (debug info 0x%x)\n",
812 status_buffer->d_id,
813 zfcp_get_busid_by_adapter(adapter),
814 status_buffer->status_subtype);
815 }
816 out:
817 return 0;
818} 762}
819 763
820/* 764static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
821 * function: zfcp_fsf_status_read_handler 765 u32 fsf_cmd, int req_flags,
822 * 766 mempool_t *pool)
823 * purpose: is called for finished Open Port command
824 *
825 * returns:
826 */
827static int
828zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
829{ 767{
830 int retval = 0; 768 volatile struct qdio_buffer_element *sbale;
831 struct zfcp_adapter *adapter = fsf_req->adapter;
832 struct fsf_status_read_buffer *status_buffer =
833 (struct fsf_status_read_buffer *) fsf_req->data;
834 struct fsf_bit_error_payload *fsf_bit_error;
835
836 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
837 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
838 mempool_free(status_buffer, adapter->pool.data_status_read);
839 zfcp_fsf_req_free(fsf_req);
840 goto out;
841 }
842 769
843 zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer); 770 struct zfcp_fsf_req *req;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q;
844 772
845 switch (status_buffer->status_type) { 773 if (req_flags & ZFCP_REQ_NO_QTCB)
774 req = zfcp_fsf_alloc_noqtcb(pool);
775 else
776 req = zfcp_fsf_alloc_qtcb(pool);
846 777
847 case FSF_STATUS_READ_PORT_CLOSED: 778 if (unlikely(!req))
848 zfcp_fsf_status_read_port_closed(fsf_req); 779 return ERR_PTR(-EIO);
849 break;
850 780
851 case FSF_STATUS_READ_INCOMING_ELS: 781 if (adapter->req_no == 0)
852 zfcp_fsf_incoming_els(fsf_req); 782 adapter->req_no++;
853 break;
854 783
855 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 784 INIT_LIST_HEAD(&req->list);
856 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", 785 init_timer(&req->timer);
857 zfcp_get_busid_by_adapter(adapter)); 786 init_waitqueue_head(&req->completion_wq);
858 break;
859 787
860 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 788 req->adapter = adapter;
861 fsf_bit_error = (struct fsf_bit_error_payload *) 789 req->fsf_command = fsf_cmd;
862 status_buffer->payload; 790 req->req_id = adapter->req_no++;
863 ZFCP_LOG_NORMAL("Warning: bit error threshold data " 791 req->sbal_number = 1;
864 "received (adapter %s, " 792 req->sbal_first = req_q->first;
865 "link failures = %i, loss of sync errors = %i, " 793 req->sbal_last = req_q->first;
866 "loss of signal errors = %i, " 794 req->sbale_curr = 1;
867 "primitive sequence errors = %i, "
868 "invalid transmission word errors = %i, "
869 "CRC errors = %i)\n",
870 zfcp_get_busid_by_adapter(adapter),
871 fsf_bit_error->link_failure_error_count,
872 fsf_bit_error->loss_of_sync_error_count,
873 fsf_bit_error->loss_of_signal_error_count,
874 fsf_bit_error->primitive_sequence_error_count,
875 fsf_bit_error->invalid_transmission_word_error_count,
876 fsf_bit_error->crc_error_count);
877 ZFCP_LOG_INFO("Additional bit error threshold data "
878 "(adapter %s, "
879 "primitive sequence event time-outs = %i, "
880 "elastic buffer overrun errors = %i, "
881 "advertised receive buffer-to-buffer credit = %i, "
882 "current receice buffer-to-buffer credit = %i, "
883 "advertised transmit buffer-to-buffer credit = %i, "
884 "current transmit buffer-to-buffer credit = %i)\n",
885 zfcp_get_busid_by_adapter(adapter),
886 fsf_bit_error->primitive_sequence_event_timeout_count,
887 fsf_bit_error->elastic_buffer_overrun_error_count,
888 fsf_bit_error->advertised_receive_b2b_credit,
889 fsf_bit_error->current_receive_b2b_credit,
890 fsf_bit_error->advertised_transmit_b2b_credit,
891 fsf_bit_error->current_transmit_b2b_credit);
892 break;
893 795
894 case FSF_STATUS_READ_LINK_DOWN: 796 sbale = zfcp_qdio_sbale_req(req);
895 switch (status_buffer->status_subtype) { 797 sbale[0].addr = (void *) req->req_id;
896 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 798 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
897 ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
898 zfcp_get_busid_by_adapter(adapter));
899 zfcp_fsf_link_down_info_eval(fsf_req, 38,
900 (struct fsf_link_down_info *)
901 &status_buffer->payload);
902 break;
903 case FSF_STATUS_READ_SUB_FDISC_FAILED:
904 ZFCP_LOG_INFO("Local link to adapter %s is down "
905 "due to failed FDISC login\n",
906 zfcp_get_busid_by_adapter(adapter));
907 zfcp_fsf_link_down_info_eval(fsf_req, 39,
908 (struct fsf_link_down_info *)
909 &status_buffer->payload);
910 break;
911 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
912 ZFCP_LOG_INFO("Local link to adapter %s is down "
913 "due to firmware update on adapter\n",
914 zfcp_get_busid_by_adapter(adapter));
915 zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL);
916 break;
917 default:
918 ZFCP_LOG_INFO("Local link to adapter %s is down "
919 "due to unknown reason\n",
920 zfcp_get_busid_by_adapter(adapter));
921 zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL);
922 };
923 break;
924 799
925 case FSF_STATUS_READ_LINK_UP: 800 if (likely(req->qtcb)) {
926 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. " 801 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
927 "Restarting operations on this adapter\n", 802 req->qtcb->prefix.req_id = req->req_id;
928 zfcp_get_busid_by_adapter(adapter)); 803 req->qtcb->prefix.ulp_info = 26;
929 /* All ports should be marked as ready to run again */ 804 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
930 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 805 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
931 ZFCP_STATUS_COMMON_RUNNING, 806 req->qtcb->header.req_handle = req->req_id;
932 ZFCP_SET); 807 req->qtcb->header.fsf_command = req->fsf_command;
933 zfcp_erp_adapter_reopen(adapter, 808 req->seq_no = adapter->fsf_req_seq_no;
934 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 809 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
935 | ZFCP_STATUS_COMMON_ERP_FAILED, 810 sbale[1].addr = (void *) req->qtcb;
936 102, fsf_req); 811 sbale[1].length = sizeof(struct fsf_qtcb);
937 break; 812 }
938 813
939 case FSF_STATUS_READ_NOTIFICATION_LOST: 814 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
940 ZFCP_LOG_NORMAL("Unsolicited status notification(s) lost: " 815 zfcp_fsf_req_free(req);
941 "adapter %s%s%s%s%s%s%s%s%s\n", 816 return ERR_PTR(-EIO);
942 zfcp_get_busid_by_adapter(adapter), 817 }
943 (status_buffer->status_subtype &
944 FSF_STATUS_READ_SUB_INCOMING_ELS) ?
945 ", incoming ELS" : "",
946 (status_buffer->status_subtype &
947 FSF_STATUS_READ_SUB_SENSE_DATA) ?
948 ", sense data" : "",
949 (status_buffer->status_subtype &
950 FSF_STATUS_READ_SUB_LINK_STATUS) ?
951 ", link status change" : "",
952 (status_buffer->status_subtype &
953 FSF_STATUS_READ_SUB_PORT_CLOSED) ?
954 ", port close" : "",
955 (status_buffer->status_subtype &
956 FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD) ?
957 ", bit error exception" : "",
958 (status_buffer->status_subtype &
959 FSF_STATUS_READ_SUB_ACT_UPDATED) ?
960 ", ACT update" : "",
961 (status_buffer->status_subtype &
962 FSF_STATUS_READ_SUB_ACT_HARDENED) ?
963 ", ACT hardening" : "",
964 (status_buffer->status_subtype &
965 FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT) ?
966 ", adapter feature change" : "");
967
968 if (status_buffer->status_subtype &
969 FSF_STATUS_READ_SUB_ACT_UPDATED)
970 zfcp_erp_adapter_access_changed(adapter, 135, fsf_req);
971 break;
972 818
973 case FSF_STATUS_READ_CFDC_UPDATED: 819 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
974 ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n", 820 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
975 zfcp_get_busid_by_adapter(adapter));
976 zfcp_erp_adapter_access_changed(adapter, 136, fsf_req);
977 break;
978 821
979 case FSF_STATUS_READ_CFDC_HARDENED: 822 return req;
980 switch (status_buffer->status_subtype) { 823}
981 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
982 ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
983 zfcp_get_busid_by_adapter(adapter));
984 break;
985 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
986 ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
987 "to the secondary SE\n",
988 zfcp_get_busid_by_adapter(adapter));
989 break;
990 default:
991 ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
992 zfcp_get_busid_by_adapter(adapter));
993 }
994 break;
995 824
996 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 825static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
997 ZFCP_LOG_INFO("List of supported features on adapter %s has " 826{
998 "been changed from 0x%08X to 0x%08X\n", 827 struct zfcp_adapter *adapter = req->adapter;
999 zfcp_get_busid_by_adapter(adapter), 828 struct zfcp_qdio_queue *req_q = &adapter->req_q;
1000 *(u32*) (status_buffer->payload + 4), 829 int idx;
1001 *(u32*) (status_buffer->payload));
1002 adapter->adapter_features = *(u32*) status_buffer->payload;
1003 break;
1004 830
1005 default: 831 /* put allocated FSF request into hash table */
1006 ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown " 832 spin_lock(&adapter->req_list_lock);
1007 "type was received (debug info 0x%x)\n", 833 idx = zfcp_reqlist_hash(req->req_id);
1008 status_buffer->status_type); 834 list_add_tail(&req->list, &adapter->req_list[idx]);
1009 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", 835 spin_unlock(&adapter->req_list_lock);
1010 status_buffer); 836
1011 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 837 req->issued = get_clock();
1012 (char *) status_buffer, 838 if (zfcp_qdio_send(req)) {
1013 sizeof (struct fsf_status_read_buffer)); 839 /* Queues are down..... */
1014 break; 840 del_timer(&req->timer);
1015 } 841 spin_lock(&adapter->req_list_lock);
1016 mempool_free(status_buffer, adapter->pool.data_status_read); 842 zfcp_reqlist_remove(adapter, req);
1017 zfcp_fsf_req_free(fsf_req); 843 spin_unlock(&adapter->req_list_lock);
1018 /* 844 /* undo changes in request queue made for this request */
1019 * recycle buffer and start new request repeat until outbound 845 atomic_add(req->sbal_number, &req_q->count);
1020 * queue is empty or adapter shutdown is requested 846 req_q->first -= req->sbal_number;
1021 */ 847 req_q->first += QDIO_MAX_BUFFERS_PER_Q;
1022 /* 848 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
1023 * FIXME(qdio): 849 zfcp_erp_adapter_reopen(adapter, 0, 116, req);
1024 * we may wait in the req_create for 5s during shutdown, so 850 return -EIO;
1025 * qdio_cleanup will have to wait at least that long before returning
1026 * with failure to allow us a proper cleanup under all circumstances
1027 */
1028 /*
1029 * FIXME:
1030 * allocation failure possible? (Is this code needed?)
1031 */
1032 retval = zfcp_fsf_status_read(adapter, 0);
1033 if (retval < 0) {
1034 ZFCP_LOG_INFO("Failed to create unsolicited status read "
1035 "request for the adapter %s.\n",
1036 zfcp_get_busid_by_adapter(adapter));
1037 /* temporary fix to avoid status read buffer shortage */
1038 adapter->status_read_failed++;
1039 if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
1040 < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
1041 ZFCP_LOG_INFO("restart adapter %s due to status read "
1042 "buffer shortage\n",
1043 zfcp_get_busid_by_adapter(adapter));
1044 zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req);
1045 }
1046 } 851 }
1047 out: 852
1048 return retval; 853 /* Don't increase for unsolicited status */
854 if (req->qtcb)
855 adapter->fsf_req_seq_no++;
856
857 return 0;
1049} 858}
1050 859
1051/* 860/**
1052 * function: zfcp_fsf_abort_fcp_command 861 * zfcp_fsf_status_read - send status read request
1053 * 862 * @adapter: pointer to struct zfcp_adapter
1054 * purpose: tells FSF to abort a running SCSI command 863 * @req_flags: request flags
1055 * 864 * Returns: 0 on success, ERROR otherwise
1056 * returns: address of initiated FSF request
1057 * NULL - request could not be initiated
1058 *
1059 * FIXME(design): should be watched by a timeout !!!
1060 * FIXME(design) shouldn't this be modified to return an int
1061 * also...don't know how though
1062 */ 865 */
1063struct zfcp_fsf_req * 866int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
1064zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1065 struct zfcp_adapter *adapter,
1066 struct zfcp_unit *unit, int req_flags)
1067{ 867{
868 struct zfcp_fsf_req *req;
869 struct fsf_status_read_buffer *sr_buf;
1068 volatile struct qdio_buffer_element *sbale; 870 volatile struct qdio_buffer_element *sbale;
1069 struct zfcp_fsf_req *fsf_req = NULL; 871 int retval = -EIO;
1070 unsigned long lock_flags;
1071 int retval = 0;
1072
1073 /* setup new FSF request */
1074 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
1075 req_flags, adapter->pool.fsf_req_abort,
1076 &lock_flags, &fsf_req);
1077 if (retval < 0) {
1078 ZFCP_LOG_INFO("error: Failed to create an abort command "
1079 "request for lun 0x%016Lx on port 0x%016Lx "
1080 "on adapter %s.\n",
1081 unit->fcp_lun,
1082 unit->port->wwpn,
1083 zfcp_get_busid_by_adapter(adapter));
1084 goto out;
1085 }
1086
1087 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
1088 &unit->status)))
1089 goto unit_blocked;
1090 872
1091 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 873 spin_lock(&adapter->req_q.lock);
1092 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 874 if (zfcp_fsf_req_sbal_get(adapter))
1093 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 875 goto out;
1094 876
1095 fsf_req->data = (unsigned long) unit; 877 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
878 ZFCP_REQ_NO_QTCB,
879 adapter->pool.fsf_req_status_read);
880 if (unlikely(IS_ERR(req))) {
881 retval = PTR_ERR(req);
882 goto out;
883 }
1096 884
1097 /* set handles of unit and its parent port in QTCB */ 885 sbale = zfcp_qdio_sbale_req(req);
1098 fsf_req->qtcb->header.lun_handle = unit->handle; 886 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
1099 fsf_req->qtcb->header.port_handle = unit->port->handle; 887 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
888 req->sbale_curr = 2;
1100 889
1101 /* set handle of request which should be aborted */ 890 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
1102 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; 891 if (!sr_buf) {
892 retval = -ENOMEM;
893 goto failed_buf;
894 }
895 memset(sr_buf, 0, sizeof(*sr_buf));
896 req->data = sr_buf;
897 sbale = zfcp_qdio_sbale_curr(req);
898 sbale->addr = (void *) sr_buf;
899 sbale->length = sizeof(*sr_buf);
1103 900
1104 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT); 901 retval = zfcp_fsf_req_send(req);
1105 retval = zfcp_fsf_req_send(fsf_req); 902 if (retval)
1106 if (!retval) 903 goto failed_req_send;
1107 goto out;
1108 904
1109 unit_blocked: 905 goto out;
1110 zfcp_fsf_req_free(fsf_req);
1111 fsf_req = NULL;
1112 906
1113 out: 907failed_req_send:
1114 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 908 mempool_free(sr_buf, adapter->pool.data_status_read);
1115 return fsf_req; 909failed_buf:
910 zfcp_fsf_req_free(req);
911 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
912out:
913 spin_unlock(&adapter->req_q.lock);
914 return retval;
1116} 915}
1117 916
1118/* 917static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
1119 * function: zfcp_fsf_abort_fcp_command_handler
1120 *
1121 * purpose: is called for finished Abort FCP Command request
1122 *
1123 * returns:
1124 */
1125static int
1126zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1127{ 918{
1128 int retval = -EINVAL; 919 struct zfcp_unit *unit = req->data;
1129 struct zfcp_unit *unit; 920 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
1130 union fsf_status_qual *fsf_stat_qual =
1131 &new_fsf_req->qtcb->header.fsf_status_qual;
1132
1133 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1134 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1135 goto skip_fsfstatus;
1136 }
1137
1138 unit = (struct zfcp_unit *) new_fsf_req->data;
1139 921
1140 /* evaluate FSF status in QTCB */ 922 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1141 switch (new_fsf_req->qtcb->header.fsf_status) { 923 return;
1142 924
925 switch (req->qtcb->header.fsf_status) {
1143 case FSF_PORT_HANDLE_NOT_VALID: 926 case FSF_PORT_HANDLE_NOT_VALID:
1144 if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { 927 if (fsq->word[0] == fsq->word[1]) {
1145 /*
1146 * In this case a command that was sent prior to a port
1147 * reopen was aborted (handles are different). This is
1148 * fine.
1149 */
1150 } else {
1151 ZFCP_LOG_INFO("Temporary port identifier 0x%x for "
1152 "port 0x%016Lx on adapter %s invalid. "
1153 "This may happen occasionally.\n",
1154 unit->port->handle,
1155 unit->port->wwpn,
1156 zfcp_get_busid_by_unit(unit));
1157 ZFCP_LOG_INFO("status qualifier:\n");
1158 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1159 (char *) &new_fsf_req->qtcb->header.
1160 fsf_status_qual,
1161 sizeof (union fsf_status_qual));
1162 /* Let's hope this sorts out the mess */
1163 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, 928 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
1164 new_fsf_req); 929 req);
1165 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 930 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1166 } 931 }
1167 break; 932 break;
1168
1169 case FSF_LUN_HANDLE_NOT_VALID: 933 case FSF_LUN_HANDLE_NOT_VALID:
1170 if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { 934 if (fsq->word[0] == fsq->word[1]) {
1171 /* 935 zfcp_erp_port_reopen(unit->port, 0, 105, req);
1172 * In this case a command that was sent prior to a unit 936 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1173 * reopen was aborted (handles are different).
1174 * This is fine.
1175 */
1176 } else {
1177 ZFCP_LOG_INFO
1178 ("Warning: Temporary LUN identifier 0x%x of LUN "
1179 "0x%016Lx on port 0x%016Lx on adapter %s is "
1180 "invalid. This may happen in rare cases. "
1181 "Trying to re-establish link.\n",
1182 unit->handle,
1183 unit->fcp_lun,
1184 unit->port->wwpn,
1185 zfcp_get_busid_by_unit(unit));
1186 ZFCP_LOG_DEBUG("Status qualifier data:\n");
1187 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
1188 (char *) &new_fsf_req->qtcb->header.
1189 fsf_status_qual,
1190 sizeof (union fsf_status_qual));
1191 /* Let's hope this sorts out the mess */
1192 zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req);
1193 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1194 } 937 }
1195 break; 938 break;
1196
1197 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 939 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1198 retval = 0; 940 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1199 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1200 break; 941 break;
1201
1202 case FSF_PORT_BOXED: 942 case FSF_PORT_BOXED:
1203 ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to " 943 zfcp_erp_port_boxed(unit->port, 47, req);
1204 "be reopened\n", unit->port->wwpn, 944 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1205 zfcp_get_busid_by_unit(unit)); 945 ZFCP_STATUS_FSFREQ_RETRY;
1206 zfcp_erp_port_boxed(unit->port, 47, new_fsf_req);
1207 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1208 | ZFCP_STATUS_FSFREQ_RETRY;
1209 break; 946 break;
1210
1211 case FSF_LUN_BOXED: 947 case FSF_LUN_BOXED:
1212 ZFCP_LOG_INFO( 948 zfcp_erp_unit_boxed(unit, 48, req);
1213 "unit 0x%016Lx on port 0x%016Lx on adapter %s needs " 949 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1214 "to be reopened\n", 950 ZFCP_STATUS_FSFREQ_RETRY;
1215 unit->fcp_lun, unit->port->wwpn,
1216 zfcp_get_busid_by_unit(unit));
1217 zfcp_erp_unit_boxed(unit, 48, new_fsf_req);
1218 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1219 | ZFCP_STATUS_FSFREQ_RETRY;
1220 break; 951 break;
1221
1222 case FSF_ADAPTER_STATUS_AVAILABLE: 952 case FSF_ADAPTER_STATUS_AVAILABLE:
1223 switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) { 953 switch (fsq->word[0]) {
1224 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 954 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1225 zfcp_test_link(unit->port); 955 zfcp_test_link(unit->port);
1226 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1227 break;
1228 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 956 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1229 /* SCSI stack will escalate */ 957 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1230 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1231 break;
1232 default:
1233 ZFCP_LOG_NORMAL
1234 ("bug: Wrong status qualifier 0x%x arrived.\n",
1235 new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
1236 break; 958 break;
1237 } 959 }
1238 break; 960 break;
1239
1240 case FSF_GOOD: 961 case FSF_GOOD:
1241 retval = 0; 962 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1242 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1243 break;
1244
1245 default:
1246 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1247 "(debug info 0x%x)\n",
1248 new_fsf_req->qtcb->header.fsf_status);
1249 break; 963 break;
1250 } 964 }
1251 skip_fsfstatus:
1252 return retval;
1253} 965}
1254 966
1255/** 967/**
1256 * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into 968 * zfcp_fsf_abort_fcp_command - abort running SCSI command
1257 * one SBALE 969 * @old_req_id: unsigned long
1258 * Two scatter-gather lists are passed, one for the reqeust and one for the 970 * @adapter: pointer to struct zfcp_adapter
1259 * response. 971 * @unit: pointer to struct zfcp_unit
972 * @req_flags: integer specifying the request flags
973 * Returns: pointer to struct zfcp_fsf_req
974 *
975 * FIXME(design): should be watched by a timeout !!!
1260 */ 976 */
1261static inline int
1262zfcp_use_one_sbal(struct scatterlist *req, int req_count,
1263 struct scatterlist *resp, int resp_count)
1264{
1265 return ((req_count == 1) &&
1266 (resp_count == 1) &&
1267 (((unsigned long) zfcp_sg_to_address(&req[0]) &
1268 PAGE_MASK) ==
1269 ((unsigned long) (zfcp_sg_to_address(&req[0]) +
1270 req[0].length - 1) & PAGE_MASK)) &&
1271 (((unsigned long) zfcp_sg_to_address(&resp[0]) &
1272 PAGE_MASK) ==
1273 ((unsigned long) (zfcp_sg_to_address(&resp[0]) +
1274 resp[0].length - 1) & PAGE_MASK)));
1275}
1276 977
1277/** 978struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1278 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 979 struct zfcp_adapter *adapter,
1279 * @ct: pointer to struct zfcp_send_ct which conatins all needed data for 980 struct zfcp_unit *unit,
1280 * the request 981 int req_flags)
1281 * @pool: pointer to memory pool, if non-null this pool is used to allocate
1282 * a struct zfcp_fsf_req
1283 * @erp_action: pointer to erp_action, if non-null the Generic Service request
1284 * is sent within error recovery
1285 */
1286int
1287zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1288 struct zfcp_erp_action *erp_action)
1289{ 982{
1290 volatile struct qdio_buffer_element *sbale; 983 volatile struct qdio_buffer_element *sbale;
1291 struct zfcp_port *port; 984 struct zfcp_fsf_req *req = NULL;
1292 struct zfcp_adapter *adapter;
1293 struct zfcp_fsf_req *fsf_req;
1294 unsigned long lock_flags;
1295 int bytes;
1296 int ret = 0;
1297
1298 port = ct->port;
1299 adapter = port->adapter;
1300
1301 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1302 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
1303 pool, &lock_flags, &fsf_req);
1304 if (ret < 0) {
1305 ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for "
1306 "adapter: %s\n",
1307 zfcp_get_busid_by_adapter(adapter));
1308 goto failed_req;
1309 }
1310 985
1311 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 986 spin_lock(&adapter->req_q.lock);
1312 if (zfcp_use_one_sbal(ct->req, ct->req_count, 987 if (!atomic_read(&adapter->req_q.count))
1313 ct->resp, ct->resp_count)){ 988 goto out;
1314 /* both request buffer and response buffer 989 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
1315 fit into one sbale each */ 990 req_flags, adapter->pool.fsf_req_abort);
1316 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; 991 if (unlikely(IS_ERR(req)))
1317 sbale[2].addr = zfcp_sg_to_address(&ct->req[0]); 992 goto out;
1318 sbale[2].length = ct->req[0].length;
1319 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1320 sbale[3].length = ct->resp[0].length;
1321 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1322 } else if (adapter->adapter_features &
1323 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1324 /* try to use chained SBALs */
1325 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1326 SBAL_FLAGS0_TYPE_WRITE_READ,
1327 ct->req, ct->req_count,
1328 ZFCP_MAX_SBALS_PER_CT_REQ);
1329 if (bytes <= 0) {
1330 ZFCP_LOG_INFO("error: creation of CT request failed "
1331 "on adapter %s\n",
1332 zfcp_get_busid_by_adapter(adapter));
1333 if (bytes == 0)
1334 ret = -ENOMEM;
1335 else
1336 ret = bytes;
1337
1338 goto failed_send;
1339 }
1340 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1341 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1342 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1343 SBAL_FLAGS0_TYPE_WRITE_READ,
1344 ct->resp, ct->resp_count,
1345 ZFCP_MAX_SBALS_PER_CT_REQ);
1346 if (bytes <= 0) {
1347 ZFCP_LOG_INFO("error: creation of CT request failed "
1348 "on adapter %s\n",
1349 zfcp_get_busid_by_adapter(adapter));
1350 if (bytes == 0)
1351 ret = -ENOMEM;
1352 else
1353 ret = bytes;
1354
1355 goto failed_send;
1356 }
1357 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1358 } else {
1359 /* reject send generic request */
1360 ZFCP_LOG_INFO(
1361 "error: microcode does not support chained SBALs,"
1362 "CT request too big (adapter %s)\n",
1363 zfcp_get_busid_by_adapter(adapter));
1364 ret = -EOPNOTSUPP;
1365 goto failed_send;
1366 }
1367
1368 /* settings in QTCB */
1369 fsf_req->qtcb->header.port_handle = port->handle;
1370 fsf_req->qtcb->bottom.support.service_class =
1371 ZFCP_FC_SERVICE_CLASS_DEFAULT;
1372 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1373 fsf_req->data = (unsigned long) ct;
1374
1375 zfcp_san_dbf_event_ct_request(fsf_req);
1376 993
1377 if (erp_action) { 994 if (unlikely(!(atomic_read(&unit->status) &
1378 erp_action->fsf_req = fsf_req; 995 ZFCP_STATUS_COMMON_UNBLOCKED)))
1379 fsf_req->erp_action = erp_action; 996 goto out_error_free;
1380 zfcp_erp_start_timer(fsf_req);
1381 } else
1382 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1383 997
1384 ret = zfcp_fsf_req_send(fsf_req); 998 sbale = zfcp_qdio_sbale_req(req);
1385 if (ret) { 999 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1386 ZFCP_LOG_DEBUG("error: initiation of CT request failed " 1000 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1387 "(adapter %s, port 0x%016Lx)\n",
1388 zfcp_get_busid_by_adapter(adapter), port->wwpn);
1389 goto failed_send;
1390 }
1391 1001
1392 ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n", 1002 req->data = unit;
1393 zfcp_get_busid_by_adapter(adapter), port->wwpn); 1003 req->handler = zfcp_fsf_abort_fcp_command_handler;
1394 goto out; 1004 req->qtcb->header.lun_handle = unit->handle;
1005 req->qtcb->header.port_handle = unit->port->handle;
1006 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1395 1007
1396 failed_send: 1008 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
1397 zfcp_fsf_req_free(fsf_req); 1009 if (!zfcp_fsf_req_send(req))
1398 if (erp_action != NULL) { 1010 goto out;
1399 erp_action->fsf_req = NULL; 1011
1400 } 1012out_error_free:
1401 failed_req: 1013 zfcp_fsf_req_free(req);
1402 out: 1014 req = NULL;
1403 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1015out:
1404 lock_flags); 1016 spin_unlock(&adapter->req_q.lock);
1405 return ret; 1017 return req;
1406} 1018}
1407 1019
1408/** 1020static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1409 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1410 * @fsf_req: pointer to struct zfcp_fsf_req
1411 *
1412 * Data specific for the Generic Service request is passed using
1413 * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
1414 * Usually a specific handler for the CT request is called which is
1415 * found in this structure.
1416 */
1417static int
1418zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1419{ 1021{
1420 struct zfcp_port *port; 1022 struct zfcp_adapter *adapter = req->adapter;
1421 struct zfcp_adapter *adapter; 1023 struct zfcp_send_ct *send_ct = req->data;
1422 struct zfcp_send_ct *send_ct; 1024 struct zfcp_port *port = send_ct->port;
1423 struct fsf_qtcb_header *header; 1025 struct fsf_qtcb_header *header = &req->qtcb->header;
1424 struct fsf_qtcb_bottom_support *bottom;
1425 int retval = -EINVAL;
1426 u16 subtable, rule, counter;
1427 1026
1428 adapter = fsf_req->adapter; 1027 send_ct->status = -EINVAL;
1429 send_ct = (struct zfcp_send_ct *) fsf_req->data;
1430 port = send_ct->port;
1431 header = &fsf_req->qtcb->header;
1432 bottom = &fsf_req->qtcb->bottom.support;
1433 1028
1434 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 1029 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1435 goto skip_fsfstatus; 1030 goto skip_fsfstatus;
1436 1031
1437 /* evaluate FSF status in QTCB */
1438 switch (header->fsf_status) { 1032 switch (header->fsf_status) {
1439
1440 case FSF_GOOD: 1033 case FSF_GOOD:
1441 zfcp_san_dbf_event_ct_response(fsf_req); 1034 zfcp_san_dbf_event_ct_response(req);
1442 retval = 0; 1035 send_ct->status = 0;
1443 break; 1036 break;
1444
1445 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1037 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1446 ZFCP_LOG_INFO("error: adapter %s does not support fc " 1038 zfcp_fsf_class_not_supp(req);
1447 "class %d.\n",
1448 zfcp_get_busid_by_port(port),
1449 ZFCP_FC_SERVICE_CLASS_DEFAULT);
1450 /* stop operation for this adapter */
1451 zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req);
1452 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1453 break; 1039 break;
1454
1455 case FSF_ADAPTER_STATUS_AVAILABLE: 1040 case FSF_ADAPTER_STATUS_AVAILABLE:
1456 switch (header->fsf_status_qual.word[0]){ 1041 switch (header->fsf_status_qual.word[0]){
1457 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1042 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1458 /* reopening link to port */
1459 zfcp_test_link(port); 1043 zfcp_test_link(port);
1460 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1461 break;
1462 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1463 /* ERP strategy will escalate */ 1045 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1464 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1465 break;
1466 default:
1467 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x "
1468 "arrived.\n",
1469 header->fsf_status_qual.word[0]);
1470 break; 1046 break;
1471 } 1047 }
1472 break; 1048 break;
1473
1474 case FSF_ACCESS_DENIED: 1049 case FSF_ACCESS_DENIED:
1475 ZFCP_LOG_NORMAL("access denied, cannot send generic service " 1050 zfcp_fsf_access_denied_port(req, port);
1476 "command (adapter %s, port d_id=0x%06x)\n",
1477 zfcp_get_busid_by_port(port), port->d_id);
1478 for (counter = 0; counter < 2; counter++) {
1479 subtable = header->fsf_status_qual.halfword[counter * 2];
1480 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1481 switch (subtable) {
1482 case FSF_SQ_CFDC_SUBTABLE_OS:
1483 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1484 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1485 case FSF_SQ_CFDC_SUBTABLE_LUN:
1486 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1487 zfcp_act_subtable_type[subtable], rule);
1488 break;
1489 }
1490 }
1491 zfcp_erp_port_access_denied(port, 55, fsf_req);
1492 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1493 break;
1494
1495 case FSF_GENERIC_COMMAND_REJECTED:
1496 ZFCP_LOG_INFO("generic service command rejected "
1497 "(adapter %s, port d_id=0x%06x)\n",
1498 zfcp_get_busid_by_port(port), port->d_id);
1499 ZFCP_LOG_INFO("status qualifier:\n");
1500 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1501 (char *) &header->fsf_status_qual,
1502 sizeof (union fsf_status_qual));
1503 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1504 break;
1505
1506 case FSF_PORT_HANDLE_NOT_VALID:
1507 ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port "
1508 "0x%016Lx on adapter %s invalid. This may "
1509 "happen occasionally.\n", port->handle,
1510 port->wwpn, zfcp_get_busid_by_port(port));
1511 ZFCP_LOG_INFO("status qualifier:\n");
1512 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1513 (char *) &header->fsf_status_qual,
1514 sizeof (union fsf_status_qual));
1515 zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req);
1516 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1517 break; 1051 break;
1518
1519 case FSF_PORT_BOXED: 1052 case FSF_PORT_BOXED:
1520 ZFCP_LOG_INFO("port needs to be reopened " 1053 zfcp_erp_port_boxed(port, 49, req);
1521 "(adapter %s, port d_id=0x%06x)\n", 1054 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1522 zfcp_get_busid_by_port(port), port->d_id); 1055 ZFCP_STATUS_FSFREQ_RETRY;
1523 zfcp_erp_port_boxed(port, 49, fsf_req);
1524 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1525 | ZFCP_STATUS_FSFREQ_RETRY;
1526 break; 1056 break;
1527 1057 case FSF_PORT_HANDLE_NOT_VALID:
1528 /* following states should never occure, all cases avoided 1058 zfcp_erp_adapter_reopen(adapter, 0, 106, req);
1529 in zfcp_fsf_send_ct - but who knows ... */ 1059 case FSF_GENERIC_COMMAND_REJECTED:
1530 case FSF_PAYLOAD_SIZE_MISMATCH: 1060 case FSF_PAYLOAD_SIZE_MISMATCH:
1531 ZFCP_LOG_INFO("payload size mismatch (adapter: %s, "
1532 "req_buf_length=%d, resp_buf_length=%d)\n",
1533 zfcp_get_busid_by_adapter(adapter),
1534 bottom->req_buf_length, bottom->resp_buf_length);
1535 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1536 break;
1537 case FSF_REQUEST_SIZE_TOO_LARGE: 1061 case FSF_REQUEST_SIZE_TOO_LARGE:
1538 ZFCP_LOG_INFO("request size too large (adapter: %s, "
1539 "req_buf_length=%d)\n",
1540 zfcp_get_busid_by_adapter(adapter),
1541 bottom->req_buf_length);
1542 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1543 break;
1544 case FSF_RESPONSE_SIZE_TOO_LARGE: 1062 case FSF_RESPONSE_SIZE_TOO_LARGE:
1545 ZFCP_LOG_INFO("response size too large (adapter: %s, "
1546 "resp_buf_length=%d)\n",
1547 zfcp_get_busid_by_adapter(adapter),
1548 bottom->resp_buf_length);
1549 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1550 break;
1551 case FSF_SBAL_MISMATCH: 1063 case FSF_SBAL_MISMATCH:
1552 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, " 1064 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1553 "resp_buf_length=%d)\n",
1554 zfcp_get_busid_by_adapter(adapter),
1555 bottom->req_buf_length, bottom->resp_buf_length);
1556 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1557 break;
1558
1559 default:
1560 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1561 "(debug info 0x%x)\n", header->fsf_status);
1562 break; 1065 break;
1563 } 1066 }
1564 1067
1565skip_fsfstatus: 1068skip_fsfstatus:
1566 send_ct->status = retval; 1069 if (send_ct->handler)
1567
1568 if (send_ct->handler != NULL)
1569 send_ct->handler(send_ct->handler_data); 1070 send_ct->handler(send_ct->handler_data);
1071}
1570 1072
1571 return retval; 1073static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1074 struct scatterlist *sg_req,
1075 struct scatterlist *sg_resp, int max_sbals)
1076{
1077 int bytes;
1078
1079 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1080 sg_req, max_sbals);
1081 if (bytes <= 0)
1082 return -ENOMEM;
1083 req->qtcb->bottom.support.req_buf_length = bytes;
1084 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1085
1086 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1087 sg_resp, max_sbals);
1088 if (bytes <= 0)
1089 return -ENOMEM;
1090 req->qtcb->bottom.support.resp_buf_length = bytes;
1091
1092 return 0;
1572} 1093}
1573 1094
1574/** 1095/**
1575 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1096 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1576 * @els: pointer to struct zfcp_send_els which contains all needed data for 1097 * @ct: pointer to struct zfcp_send_ct with data for request
1577 * the command. 1098 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1099 * @erp_action: if non-null the Generic Service request sent within ERP
1578 */ 1100 */
1579int 1101int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1580zfcp_fsf_send_els(struct zfcp_send_els *els) 1102 struct zfcp_erp_action *erp_action)
1581{ 1103{
1582 volatile struct qdio_buffer_element *sbale; 1104 struct zfcp_port *port = ct->port;
1583 struct zfcp_fsf_req *fsf_req; 1105 struct zfcp_adapter *adapter = port->adapter;
1584 u32 d_id; 1106 struct zfcp_fsf_req *req;
1585 struct zfcp_adapter *adapter; 1107 int ret = -EIO;
1586 unsigned long lock_flags;
1587 int bytes;
1588 int ret = 0;
1589
1590 d_id = els->d_id;
1591 adapter = els->adapter;
1592 1108
1593 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1109 spin_lock(&adapter->req_q.lock);
1594 ZFCP_REQ_AUTO_CLEANUP, 1110 if (zfcp_fsf_req_sbal_get(adapter))
1595 NULL, &lock_flags, &fsf_req); 1111 goto out;
1596 if (ret < 0) {
1597 ZFCP_LOG_INFO("error: creation of ELS request failed "
1598 "(adapter %s, port d_id: 0x%06x)\n",
1599 zfcp_get_busid_by_adapter(adapter), d_id);
1600 goto failed_req;
1601 }
1602 1112
1603 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 1113 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1604 &els->port->status))) { 1114 ZFCP_REQ_AUTO_CLEANUP, pool);
1605 ret = -EBUSY; 1115 if (unlikely(IS_ERR(req))) {
1606 goto port_blocked; 1116 ret = PTR_ERR(req);
1117 goto out;
1607 } 1118 }
1608 1119
1609 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1120 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
1610 if (zfcp_use_one_sbal(els->req, els->req_count, 1121 FSF_MAX_SBALS_PER_REQ);
1611 els->resp, els->resp_count)){ 1122 if (ret)
1612 /* both request buffer and response buffer
1613 fit into one sbale each */
1614 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1615 sbale[2].addr = zfcp_sg_to_address(&els->req[0]);
1616 sbale[2].length = els->req[0].length;
1617 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1618 sbale[3].length = els->resp[0].length;
1619 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1620 } else if (adapter->adapter_features &
1621 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1622 /* try to use chained SBALs */
1623 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1624 SBAL_FLAGS0_TYPE_WRITE_READ,
1625 els->req, els->req_count,
1626 ZFCP_MAX_SBALS_PER_ELS_REQ);
1627 if (bytes <= 0) {
1628 ZFCP_LOG_INFO("error: creation of ELS request failed "
1629 "(adapter %s, port d_id: 0x%06x)\n",
1630 zfcp_get_busid_by_adapter(adapter), d_id);
1631 if (bytes == 0) {
1632 ret = -ENOMEM;
1633 } else {
1634 ret = bytes;
1635 }
1636 goto failed_send;
1637 }
1638 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1639 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1640 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1641 SBAL_FLAGS0_TYPE_WRITE_READ,
1642 els->resp, els->resp_count,
1643 ZFCP_MAX_SBALS_PER_ELS_REQ);
1644 if (bytes <= 0) {
1645 ZFCP_LOG_INFO("error: creation of ELS request failed "
1646 "(adapter %s, port d_id: 0x%06x)\n",
1647 zfcp_get_busid_by_adapter(adapter), d_id);
1648 if (bytes == 0) {
1649 ret = -ENOMEM;
1650 } else {
1651 ret = bytes;
1652 }
1653 goto failed_send;
1654 }
1655 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1656 } else {
1657 /* reject request */
1658 ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
1659 ", ELS request too big (adapter %s, "
1660 "port d_id: 0x%06x)\n",
1661 zfcp_get_busid_by_adapter(adapter), d_id);
1662 ret = -EOPNOTSUPP;
1663 goto failed_send;
1664 }
1665
1666 /* settings in QTCB */
1667 fsf_req->qtcb->bottom.support.d_id = d_id;
1668 fsf_req->qtcb->bottom.support.service_class =
1669 ZFCP_FC_SERVICE_CLASS_DEFAULT;
1670 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1671 fsf_req->data = (unsigned long) els;
1672
1673 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1674
1675 zfcp_san_dbf_event_els_request(fsf_req);
1676
1677 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1678 ret = zfcp_fsf_req_send(fsf_req);
1679 if (ret) {
1680 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1681 "(adapter %s, port d_id: 0x%06x)\n",
1682 zfcp_get_busid_by_adapter(adapter), d_id);
1683 goto failed_send; 1123 goto failed_send;
1684 }
1685 1124
1686 ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: " 1125 req->handler = zfcp_fsf_send_ct_handler;
1687 "0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id); 1126 req->qtcb->header.port_handle = port->handle;
1688 goto out; 1127 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1128 req->qtcb->bottom.support.timeout = ct->timeout;
1129 req->data = ct;
1689 1130
1690 port_blocked: 1131 zfcp_san_dbf_event_ct_request(req);
1691 failed_send:
1692 zfcp_fsf_req_free(fsf_req);
1693 1132
1694 failed_req: 1133 if (erp_action) {
1695 out: 1134 erp_action->fsf_req = req;
1696 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1135 req->erp_action = erp_action;
1697 lock_flags); 1136 zfcp_fsf_start_erp_timer(req);
1137 } else
1138 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1139
1140 ret = zfcp_fsf_req_send(req);
1141 if (ret)
1142 goto failed_send;
1143
1144 goto out;
1698 1145
1699 return ret; 1146failed_send:
1147 zfcp_fsf_req_free(req);
1148 if (erp_action)
1149 erp_action->fsf_req = NULL;
1150out:
1151 spin_unlock(&adapter->req_q.lock);
1152 return ret;
1700} 1153}
1701 1154
1702/** 1155static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1703 * zfcp_fsf_send_els_handler - handler for ELS commands
1704 * @fsf_req: pointer to struct zfcp_fsf_req
1705 *
1706 * Data specific for the ELS command is passed using
1707 * fsf_req->data. There we find the pointer to struct zfcp_send_els.
1708 * Usually a specific handler for the ELS command is called which is
1709 * found in this structure.
1710 */
1711static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1712{ 1156{
1713 struct zfcp_adapter *adapter; 1157 struct zfcp_send_els *send_els = req->data;
1714 struct zfcp_port *port; 1158 struct zfcp_port *port = send_els->port;
1715 u32 d_id; 1159 struct fsf_qtcb_header *header = &req->qtcb->header;
1716 struct fsf_qtcb_header *header; 1160
1717 struct fsf_qtcb_bottom_support *bottom; 1161 send_els->status = -EINVAL;
1718 struct zfcp_send_els *send_els; 1162
1719 int retval = -EINVAL; 1163 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1720 u16 subtable, rule, counter;
1721
1722 send_els = (struct zfcp_send_els *) fsf_req->data;
1723 adapter = send_els->adapter;
1724 port = send_els->port;
1725 d_id = send_els->d_id;
1726 header = &fsf_req->qtcb->header;
1727 bottom = &fsf_req->qtcb->bottom.support;
1728
1729 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
1730 goto skip_fsfstatus; 1164 goto skip_fsfstatus;
1731 1165
1732 switch (header->fsf_status) { 1166 switch (header->fsf_status) {
1733
1734 case FSF_GOOD: 1167 case FSF_GOOD:
1735 zfcp_san_dbf_event_els_response(fsf_req); 1168 zfcp_san_dbf_event_els_response(req);
1736 retval = 0; 1169 send_els->status = 0;
1737 break; 1170 break;
1738
1739 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1171 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1740 ZFCP_LOG_INFO("error: adapter %s does not support fc " 1172 zfcp_fsf_class_not_supp(req);
1741 "class %d.\n",
1742 zfcp_get_busid_by_adapter(adapter),
1743 ZFCP_FC_SERVICE_CLASS_DEFAULT);
1744 /* stop operation for this adapter */
1745 zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req);
1746 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1747 break; 1173 break;
1748
1749 case FSF_ADAPTER_STATUS_AVAILABLE: 1174 case FSF_ADAPTER_STATUS_AVAILABLE:
1750 switch (header->fsf_status_qual.word[0]){ 1175 switch (header->fsf_status_qual.word[0]){
1751 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1176 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1752 if (port && (send_els->ls_code != ZFCP_LS_ADISC)) 1177 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1753 zfcp_test_link(port); 1178 zfcp_test_link(port);
1754 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1179 /*fall through */
1755 break;
1756 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1180 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1757 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1758 retval =
1759 zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
1760 (struct zfcp_ls_rjt_par *)
1761 &header->fsf_status_qual.word[2]);
1762 break;
1763 case FSF_SQ_RETRY_IF_POSSIBLE: 1181 case FSF_SQ_RETRY_IF_POSSIBLE:
1764 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1182 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1765 break; 1183 break;
1766 default:
1767 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n",
1768 header->fsf_status_qual.word[0]);
1769 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1770 (char*)header->fsf_status_qual.word, 16);
1771 } 1184 }
1772 break; 1185 break;
1773
1774 case FSF_ELS_COMMAND_REJECTED: 1186 case FSF_ELS_COMMAND_REJECTED:
1775 ZFCP_LOG_INFO("ELS has been rejected because command filter "
1776 "prohibited sending "
1777 "(adapter: %s, port d_id: 0x%06x)\n",
1778 zfcp_get_busid_by_adapter(adapter), d_id);
1779
1780 break;
1781
1782 case FSF_PAYLOAD_SIZE_MISMATCH: 1187 case FSF_PAYLOAD_SIZE_MISMATCH:
1783 ZFCP_LOG_INFO(
1784 "ELS request size and ELS response size must be either "
1785 "both 0, or both greater than 0 "
1786 "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n",
1787 zfcp_get_busid_by_adapter(adapter),
1788 bottom->req_buf_length,
1789 bottom->resp_buf_length);
1790 break;
1791
1792 case FSF_REQUEST_SIZE_TOO_LARGE: 1188 case FSF_REQUEST_SIZE_TOO_LARGE:
1793 ZFCP_LOG_INFO(
1794 "Length of the ELS request buffer, "
1795 "specified in QTCB bottom, "
1796 "exceeds the size of the buffers "
1797 "that have been allocated for ELS request data "
1798 "(adapter: %s, req_buf_length=%d)\n",
1799 zfcp_get_busid_by_adapter(adapter),
1800 bottom->req_buf_length);
1801 break;
1802
1803 case FSF_RESPONSE_SIZE_TOO_LARGE: 1189 case FSF_RESPONSE_SIZE_TOO_LARGE:
1804 ZFCP_LOG_INFO(
1805 "Length of the ELS response buffer, "
1806 "specified in QTCB bottom, "
1807 "exceeds the size of the buffers "
1808 "that have been allocated for ELS response data "
1809 "(adapter: %s, resp_buf_length=%d)\n",
1810 zfcp_get_busid_by_adapter(adapter),
1811 bottom->resp_buf_length);
1812 break;
1813
1814 case FSF_SBAL_MISMATCH:
1815 /* should never occure, avoided in zfcp_fsf_send_els */
1816 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
1817 "resp_buf_length=%d)\n",
1818 zfcp_get_busid_by_adapter(adapter),
1819 bottom->req_buf_length, bottom->resp_buf_length);
1820 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1821 break; 1190 break;
1822
1823 case FSF_ACCESS_DENIED: 1191 case FSF_ACCESS_DENIED:
1824 ZFCP_LOG_NORMAL("access denied, cannot send ELS command " 1192 zfcp_fsf_access_denied_port(req, port);
1825 "(adapter %s, port d_id=0x%06x)\n",
1826 zfcp_get_busid_by_adapter(adapter), d_id);
1827 for (counter = 0; counter < 2; counter++) {
1828 subtable = header->fsf_status_qual.halfword[counter * 2];
1829 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1830 switch (subtable) {
1831 case FSF_SQ_CFDC_SUBTABLE_OS:
1832 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1833 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1834 case FSF_SQ_CFDC_SUBTABLE_LUN:
1835 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1836 zfcp_act_subtable_type[subtable], rule);
1837 break;
1838 }
1839 }
1840 if (port != NULL)
1841 zfcp_erp_port_access_denied(port, 56, fsf_req);
1842 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1843 break; 1193 break;
1844 1194 case FSF_SBAL_MISMATCH:
1195 /* should never occure, avoided in zfcp_fsf_send_els */
1196 /* fall through */
1845 default: 1197 default:
1846 ZFCP_LOG_NORMAL( 1198 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 "bug: An unknown FSF Status was presented "
1848 "(adapter: %s, fsf_status=0x%08x)\n",
1849 zfcp_get_busid_by_adapter(adapter),
1850 header->fsf_status);
1851 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1852 break; 1199 break;
1853 } 1200 }
1854
1855skip_fsfstatus: 1201skip_fsfstatus:
1856 send_els->status = retval;
1857
1858 if (send_els->handler) 1202 if (send_els->handler)
1859 send_els->handler(send_els->handler_data); 1203 send_els->handler(send_els->handler_data);
1204}
1860 1205
1861 return retval; 1206/**
1207 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1208 * @els: pointer to struct zfcp_send_els with data for the command
1209 */
1210int zfcp_fsf_send_els(struct zfcp_send_els *els)
1211{
1212 struct zfcp_fsf_req *req;
1213 struct zfcp_adapter *adapter = els->adapter;
1214 struct fsf_qtcb_bottom_support *bottom;
1215 int ret = -EIO;
1216
1217 if (unlikely(!(atomic_read(&els->port->status) &
1218 ZFCP_STATUS_COMMON_UNBLOCKED)))
1219 return -EBUSY;
1220
1221 spin_lock(&adapter->req_q.lock);
1222 if (!atomic_read(&adapter->req_q.count))
1223 goto out;
1224 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1225 ZFCP_REQ_AUTO_CLEANUP, NULL);
1226 if (unlikely(IS_ERR(req))) {
1227 ret = PTR_ERR(req);
1228 goto out;
1229 }
1230
1231 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp,
1232 FSF_MAX_SBALS_PER_ELS_REQ);
1233 if (ret)
1234 goto failed_send;
1235
1236 bottom = &req->qtcb->bottom.support;
1237 req->handler = zfcp_fsf_send_els_handler;
1238 bottom->d_id = els->d_id;
1239 bottom->service_class = FSF_CLASS_3;
1240 bottom->timeout = 2 * R_A_TOV;
1241 req->data = els;
1242
1243 zfcp_san_dbf_event_els_request(req);
1244
1245 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1246 ret = zfcp_fsf_req_send(req);
1247 if (ret)
1248 goto failed_send;
1249
1250 goto out;
1251
1252failed_send:
1253 zfcp_fsf_req_free(req);
1254out:
1255 spin_unlock(&adapter->req_q.lock);
1256 return ret;
1862} 1257}
1863 1258
1864int 1259int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1865zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1866{ 1260{
1867 volatile struct qdio_buffer_element *sbale; 1261 volatile struct qdio_buffer_element *sbale;
1868 struct zfcp_fsf_req *fsf_req; 1262 struct zfcp_fsf_req *req;
1869 struct zfcp_adapter *adapter = erp_action->adapter; 1263 struct zfcp_adapter *adapter = erp_action->adapter;
1870 unsigned long lock_flags; 1264 int retval = -EIO;
1871 int retval; 1265
1872 1266 spin_lock(&adapter->req_q.lock);
1873 /* setup new FSF request */ 1267 if (!atomic_read(&adapter->req_q.count))
1874 retval = zfcp_fsf_req_create(adapter, 1268 goto out;
1875 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1269 req = zfcp_fsf_req_create(adapter,
1876 ZFCP_REQ_AUTO_CLEANUP, 1270 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1877 adapter->pool.fsf_req_erp, 1271 ZFCP_REQ_AUTO_CLEANUP,
1878 &lock_flags, &fsf_req); 1272 adapter->pool.fsf_req_erp);
1879 if (retval) { 1273 if (unlikely(IS_ERR(req))) {
1880 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1274 retval = PTR_ERR(req);
1881 "data request for adapter %s.\n", 1275 goto out;
1882 zfcp_get_busid_by_adapter(adapter));
1883 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1884 lock_flags);
1885 return retval;
1886 } 1276 }
1887 1277
1888 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1278 sbale = zfcp_qdio_sbale_req(req);
1889 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1279 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1890 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1280 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1891 1281
1892 fsf_req->qtcb->bottom.config.feature_selection = 1282 req->qtcb->bottom.config.feature_selection =
1893 FSF_FEATURE_CFDC | 1283 FSF_FEATURE_CFDC |
1894 FSF_FEATURE_LUN_SHARING | 1284 FSF_FEATURE_LUN_SHARING |
1895 FSF_FEATURE_NOTIFICATION_LOST | 1285 FSF_FEATURE_NOTIFICATION_LOST |
1896 FSF_FEATURE_UPDATE_ALERT; 1286 FSF_FEATURE_UPDATE_ALERT;
1897 fsf_req->erp_action = erp_action; 1287 req->erp_action = erp_action;
1898 erp_action->fsf_req = fsf_req; 1288 req->handler = zfcp_fsf_exchange_config_data_handler;
1289 erp_action->fsf_req = req;
1899 1290
1900 zfcp_erp_start_timer(fsf_req); 1291 zfcp_fsf_start_erp_timer(req);
1901 retval = zfcp_fsf_req_send(fsf_req); 1292 retval = zfcp_fsf_req_send(req);
1902 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1903 lock_flags);
1904 if (retval) { 1293 if (retval) {
1905 ZFCP_LOG_INFO("error: Could not send exchange configuration " 1294 zfcp_fsf_req_free(req);
1906 "data command on the adapter %s\n",
1907 zfcp_get_busid_by_adapter(adapter));
1908 zfcp_fsf_req_free(fsf_req);
1909 erp_action->fsf_req = NULL; 1295 erp_action->fsf_req = NULL;
1910 } 1296 }
1911 else 1297out:
1912 ZFCP_LOG_DEBUG("exchange configuration data request initiated " 1298 spin_unlock(&adapter->req_q.lock);
1913 "(adapter %s)\n",
1914 zfcp_get_busid_by_adapter(adapter));
1915
1916 return retval; 1299 return retval;
1917} 1300}
1918 1301
1919int 1302int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1920zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1303 struct fsf_qtcb_bottom_config *data)
1921 struct fsf_qtcb_bottom_config *data)
1922{ 1304{
1923 volatile struct qdio_buffer_element *sbale; 1305 volatile struct qdio_buffer_element *sbale;
1924 struct zfcp_fsf_req *fsf_req; 1306 struct zfcp_fsf_req *req = NULL;
1925 unsigned long lock_flags; 1307 int retval = -EIO;
1926 int retval; 1308
1927 1309 spin_lock(&adapter->req_q.lock);
1928 /* setup new FSF request */ 1310 if (zfcp_fsf_req_sbal_get(adapter))
1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1311 goto out;
1930 ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags, 1312
1931 &fsf_req); 1313 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1932 if (retval) { 1314 0, NULL);
1933 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1315 if (unlikely(IS_ERR(req))) {
1934 "data request for adapter %s.\n", 1316 retval = PTR_ERR(req);
1935 zfcp_get_busid_by_adapter(adapter)); 1317 goto out;
1936 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1937 lock_flags);
1938 return retval;
1939 } 1318 }
1940 1319
1941 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1320 sbale = zfcp_qdio_sbale_req(req);
1942 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1321 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1943 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1322 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1323 req->handler = zfcp_fsf_exchange_config_data_handler;
1944 1324
1945 fsf_req->qtcb->bottom.config.feature_selection = 1325 req->qtcb->bottom.config.feature_selection =
1946 FSF_FEATURE_CFDC | 1326 FSF_FEATURE_CFDC |
1947 FSF_FEATURE_LUN_SHARING | 1327 FSF_FEATURE_LUN_SHARING |
1948 FSF_FEATURE_NOTIFICATION_LOST | 1328 FSF_FEATURE_NOTIFICATION_LOST |
1949 FSF_FEATURE_UPDATE_ALERT; 1329 FSF_FEATURE_UPDATE_ALERT;
1950 1330
1951 if (data) 1331 if (data)
1952 fsf_req->data = (unsigned long) data; 1332 req->data = data;
1953 1333
1954 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1334 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1955 retval = zfcp_fsf_req_send(fsf_req); 1335 retval = zfcp_fsf_req_send(req);
1956 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1336out:
1957 lock_flags); 1337 spin_unlock(&adapter->req_q.lock);
1958 if (retval) 1338 if (!retval)
1959 ZFCP_LOG_INFO("error: Could not send exchange configuration " 1339 wait_event(req->completion_wq,
1960 "data command on the adapter %s\n", 1340 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1961 zfcp_get_busid_by_adapter(adapter));
1962 else
1963 wait_event(fsf_req->completion_wq,
1964 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1965 1341
1966 zfcp_fsf_req_free(fsf_req); 1342 zfcp_fsf_req_free(req);
1967 1343
1968 return retval; 1344 return retval;
1969} 1345}
1970 1346
1971/** 1347/**
1972 * zfcp_fsf_exchange_config_evaluate
1973 * @fsf_req: fsf_req which belongs to xchg config data request
1974 * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
1975 *
1976 * returns: -EIO on error, 0 otherwise
1977 */
1978static int
1979zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
1980{
1981 struct fsf_qtcb_bottom_config *bottom;
1982 struct zfcp_adapter *adapter = fsf_req->adapter;
1983 struct Scsi_Host *shost = adapter->scsi_host;
1984
1985 bottom = &fsf_req->qtcb->bottom.config;
1986 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
1987 bottom->low_qtcb_version, bottom->high_qtcb_version);
1988 adapter->fsf_lic_version = bottom->lic_version;
1989 adapter->adapter_features = bottom->adapter_features;
1990 adapter->connection_features = bottom->connection_features;
1991 adapter->peer_wwpn = 0;
1992 adapter->peer_wwnn = 0;
1993 adapter->peer_d_id = 0;
1994
1995 if (xchg_ok) {
1996
1997 if (fsf_req->data)
1998 memcpy((struct fsf_qtcb_bottom_config *) fsf_req->data,
1999 bottom, sizeof (struct fsf_qtcb_bottom_config));
2000
2001 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
2002 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
2003 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
2004 fc_host_speed(shost) = bottom->fc_link_speed;
2005 fc_host_supported_classes(shost) =
2006 FC_COS_CLASS2 | FC_COS_CLASS3;
2007 adapter->hydra_version = bottom->adapter_type;
2008 if (fc_host_permanent_port_name(shost) == -1)
2009 fc_host_permanent_port_name(shost) =
2010 fc_host_port_name(shost);
2011 if (bottom->fc_topology == FSF_TOPO_P2P) {
2012 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
2013 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
2014 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
2015 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
2016 } else if (bottom->fc_topology == FSF_TOPO_FABRIC)
2017 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2018 else if (bottom->fc_topology == FSF_TOPO_AL)
2019 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
2020 else
2021 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2022 } else {
2023 fc_host_node_name(shost) = 0;
2024 fc_host_port_name(shost) = 0;
2025 fc_host_port_id(shost) = 0;
2026 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2027 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2028 adapter->hydra_version = 0;
2029 }
2030
2031 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
2032 adapter->hardware_version = bottom->hardware_version;
2033 memcpy(fc_host_serial_number(shost), bottom->serial_number,
2034 min(FC_SERIAL_NUMBER_SIZE, 17));
2035 EBCASC(fc_host_serial_number(shost),
2036 min(FC_SERIAL_NUMBER_SIZE, 17));
2037 }
2038
2039 if (fsf_req->erp_action)
2040 ZFCP_LOG_NORMAL("The adapter %s reported the following "
2041 "characteristics:\n"
2042 "WWNN 0x%016Lx, WWPN 0x%016Lx, "
2043 "S_ID 0x%06x,\n"
2044 "adapter version 0x%x, "
2045 "LIC version 0x%x, "
2046 "FC link speed %d Gb/s\n",
2047 zfcp_get_busid_by_adapter(adapter),
2048 (wwn_t) fc_host_node_name(shost),
2049 (wwn_t) fc_host_port_name(shost),
2050 fc_host_port_id(shost),
2051 adapter->hydra_version,
2052 adapter->fsf_lic_version,
2053 fc_host_speed(shost));
2054 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2055 ZFCP_LOG_NORMAL("error: the adapter %s "
2056 "only supports newer control block "
2057 "versions in comparison to this device "
2058 "driver (try updated device driver)\n",
2059 zfcp_get_busid_by_adapter(adapter));
2060 zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req);
2061 return -EIO;
2062 }
2063 if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
2064 ZFCP_LOG_NORMAL("error: the adapter %s "
2065 "only supports older control block "
2066 "versions than this device driver uses"
2067 "(consider a microcode upgrade)\n",
2068 zfcp_get_busid_by_adapter(adapter));
2069 zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req);
2070 return -EIO;
2071 }
2072 return 0;
2073}
2074
2075/**
2076 * function: zfcp_fsf_exchange_config_data_handler
2077 *
2078 * purpose: is called for finished Exchange Configuration Data command
2079 *
2080 * returns:
2081 */
2082static int
2083zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2084{
2085 struct fsf_qtcb_bottom_config *bottom;
2086 struct zfcp_adapter *adapter = fsf_req->adapter;
2087 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2088
2089 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2090 return -EIO;
2091
2092 switch (qtcb->header.fsf_status) {
2093
2094 case FSF_GOOD:
2095 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
2096 return -EIO;
2097
2098 switch (fc_host_port_type(adapter->scsi_host)) {
2099 case FC_PORTTYPE_PTP:
2100 ZFCP_LOG_NORMAL("Point-to-Point fibrechannel "
2101 "configuration detected at adapter %s\n"
2102 "Peer WWNN 0x%016llx, "
2103 "peer WWPN 0x%016llx, "
2104 "peer d_id 0x%06x\n",
2105 zfcp_get_busid_by_adapter(adapter),
2106 adapter->peer_wwnn,
2107 adapter->peer_wwpn,
2108 adapter->peer_d_id);
2109 break;
2110 case FC_PORTTYPE_NLPORT:
2111 ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
2112 "topology detected at adapter %s "
2113 "unsupported, shutting down adapter\n",
2114 zfcp_get_busid_by_adapter(adapter));
2115 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
2116 return -EIO;
2117 case FC_PORTTYPE_NPORT:
2118 if (fsf_req->erp_action)
2119 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2120 "network detected at adapter "
2121 "%s.\n",
2122 zfcp_get_busid_by_adapter(adapter));
2123 break;
2124 default:
2125 ZFCP_LOG_NORMAL("bug: The fibrechannel topology "
2126 "reported by the exchange "
2127 "configuration command for "
2128 "the adapter %s is not "
2129 "of a type known to the zfcp "
2130 "driver, shutting down adapter\n",
2131 zfcp_get_busid_by_adapter(adapter));
2132 zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req);
2133 return -EIO;
2134 }
2135 bottom = &qtcb->bottom.config;
2136 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2137 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2138 "allowed by the adapter %s "
2139 "is lower than the minimum "
2140 "required by the driver (%ld bytes).\n",
2141 bottom->max_qtcb_size,
2142 zfcp_get_busid_by_adapter(adapter),
2143 sizeof(struct fsf_qtcb));
2144 zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req);
2145 return -EIO;
2146 }
2147 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2148 &adapter->status);
2149 break;
2150 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2151 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2152 return -EIO;
2153
2154 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2155 &adapter->status);
2156
2157 zfcp_fsf_link_down_info_eval(fsf_req, 42,
2158 &qtcb->header.fsf_status_qual.link_down_info);
2159 break;
2160 default:
2161 zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req);
2162 return -EIO;
2163 }
2164 return 0;
2165}
2166
2167/**
2168 * zfcp_fsf_exchange_port_data - request information about local port 1348 * zfcp_fsf_exchange_port_data - request information about local port
2169 * @erp_action: ERP action for the adapter for which port data is requested 1349 * @erp_action: ERP action for the adapter for which port data is requested
1350 * Returns: 0 on success, error otherwise
2170 */ 1351 */
2171int 1352int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
2172zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
2173{ 1353{
2174 volatile struct qdio_buffer_element *sbale; 1354 volatile struct qdio_buffer_element *sbale;
2175 struct zfcp_fsf_req *fsf_req; 1355 struct zfcp_fsf_req *req;
2176 struct zfcp_adapter *adapter = erp_action->adapter; 1356 struct zfcp_adapter *adapter = erp_action->adapter;
2177 unsigned long lock_flags; 1357 int retval = -EIO;
2178 int retval;
2179 1358
2180 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { 1359 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
2181 ZFCP_LOG_INFO("error: exchange port data "
2182 "command not supported by adapter %s\n",
2183 zfcp_get_busid_by_adapter(adapter));
2184 return -EOPNOTSUPP; 1360 return -EOPNOTSUPP;
2185 }
2186 1361
2187 /* setup new FSF request */ 1362 spin_lock(&adapter->req_q.lock);
2188 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1363 if (!atomic_read(&adapter->req_q.count))
2189 ZFCP_REQ_AUTO_CLEANUP, 1364 goto out;
2190 adapter->pool.fsf_req_erp, 1365 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
2191 &lock_flags, &fsf_req); 1366 ZFCP_REQ_AUTO_CLEANUP,
2192 if (retval) { 1367 adapter->pool.fsf_req_erp);
2193 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 1368 if (unlikely(IS_ERR(req))) {
2194 "exchange port data request for " 1369 retval = PTR_ERR(req);
2195 "the adapter %s.\n", 1370 goto out;
2196 zfcp_get_busid_by_adapter(adapter));
2197 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2198 lock_flags);
2199 return retval;
2200 } 1371 }
2201 1372
2202 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1373 sbale = zfcp_qdio_sbale_req(req);
2203 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1374 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2204 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1375 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2205 1376
2206 erp_action->fsf_req = fsf_req; 1377 req->handler = zfcp_fsf_exchange_port_data_handler;
2207 fsf_req->erp_action = erp_action; 1378 req->erp_action = erp_action;
2208 zfcp_erp_start_timer(fsf_req); 1379 erp_action->fsf_req = req;
2209
2210 retval = zfcp_fsf_req_send(fsf_req);
2211 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
2212 1380
1381 zfcp_fsf_start_erp_timer(req);
1382 retval = zfcp_fsf_req_send(req);
2213 if (retval) { 1383 if (retval) {
2214 ZFCP_LOG_INFO("error: Could not send an exchange port data " 1384 zfcp_fsf_req_free(req);
2215 "command on the adapter %s\n",
2216 zfcp_get_busid_by_adapter(adapter));
2217 zfcp_fsf_req_free(fsf_req);
2218 erp_action->fsf_req = NULL; 1385 erp_action->fsf_req = NULL;
2219 } 1386 }
2220 else 1387out:
2221 ZFCP_LOG_DEBUG("exchange port data request initiated " 1388 spin_unlock(&adapter->req_q.lock);
2222 "(adapter %s)\n",
2223 zfcp_get_busid_by_adapter(adapter));
2224 return retval; 1389 return retval;
2225} 1390}
2226 1391
2227
2228/** 1392/**
2229 * zfcp_fsf_exchange_port_data_sync - request information about local port 1393 * zfcp_fsf_exchange_port_data_sync - request information about local port
2230 * and wait until information is ready 1394 * @adapter: pointer to struct zfcp_adapter
1395 * @data: pointer to struct fsf_qtcb_bottom_port
1396 * Returns: 0 on success, error otherwise
2231 */ 1397 */
2232int 1398int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
2233zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1399 struct fsf_qtcb_bottom_port *data)
2234 struct fsf_qtcb_bottom_port *data)
2235{ 1400{
2236 volatile struct qdio_buffer_element *sbale; 1401 volatile struct qdio_buffer_element *sbale;
2237 struct zfcp_fsf_req *fsf_req; 1402 struct zfcp_fsf_req *req = NULL;
2238 unsigned long lock_flags; 1403 int retval = -EIO;
2239 int retval; 1404
2240 1405 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
2241 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2242 ZFCP_LOG_INFO("error: exchange port data "
2243 "command not supported by adapter %s\n",
2244 zfcp_get_busid_by_adapter(adapter));
2245 return -EOPNOTSUPP; 1406 return -EOPNOTSUPP;
2246 }
2247 1407
2248 /* setup new FSF request */ 1408 spin_lock(&adapter->req_q.lock);
2249 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1409 if (!atomic_read(&adapter->req_q.count))
2250 0, NULL, &lock_flags, &fsf_req); 1410 goto out;
2251 if (retval) { 1411
2252 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 1412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
2253 "exchange port data request for " 1413 NULL);
2254 "the adapter %s.\n", 1414 if (unlikely(IS_ERR(req))) {
2255 zfcp_get_busid_by_adapter(adapter)); 1415 retval = PTR_ERR(req);
2256 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1416 goto out;
2257 lock_flags);
2258 return retval;
2259 } 1417 }
2260 1418
2261 if (data) 1419 if (data)
2262 fsf_req->data = (unsigned long) data; 1420 req->data = data;
2263 1421
2264 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1422 sbale = zfcp_qdio_sbale_req(req);
2265 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1423 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2266 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1424 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2267 1425
2268 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1426 req->handler = zfcp_fsf_exchange_port_data_handler;
2269 retval = zfcp_fsf_req_send(fsf_req); 1427 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2270 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 1428 retval = zfcp_fsf_req_send(req);
2271 1429out:
2272 if (retval) 1430 spin_unlock(&adapter->req_q.lock);
2273 ZFCP_LOG_INFO("error: Could not send an exchange port data " 1431 if (!retval)
2274 "command on the adapter %s\n", 1432 wait_event(req->completion_wq,
2275 zfcp_get_busid_by_adapter(adapter)); 1433 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2276 else 1434 zfcp_fsf_req_free(req);
2277 wait_event(fsf_req->completion_wq,
2278 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2279
2280 zfcp_fsf_req_free(fsf_req);
2281
2282 return retval;
2283}
2284
2285/**
2286 * zfcp_fsf_exchange_port_evaluate
2287 * @fsf_req: fsf_req which belongs to xchg port data request
2288 * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
2289 */
2290static void
2291zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2292{
2293 struct zfcp_adapter *adapter;
2294 struct fsf_qtcb_bottom_port *bottom;
2295 struct Scsi_Host *shost;
2296
2297 adapter = fsf_req->adapter;
2298 bottom = &fsf_req->qtcb->bottom.port;
2299 shost = adapter->scsi_host;
2300
2301 if (fsf_req->data)
2302 memcpy((struct fsf_qtcb_bottom_port*) fsf_req->data, bottom,
2303 sizeof(struct fsf_qtcb_bottom_port));
2304
2305 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2306 fc_host_permanent_port_name(shost) = bottom->wwpn;
2307 else
2308 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2309 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2310 fc_host_supported_speeds(shost) = bottom->supported_speed;
2311}
2312
2313/**
2314 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
2315 * @fsf_req: pointer to struct zfcp_fsf_req
2316 */
2317static void
2318zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2319{
2320 struct zfcp_adapter *adapter;
2321 struct fsf_qtcb *qtcb;
2322
2323 adapter = fsf_req->adapter;
2324 qtcb = fsf_req->qtcb;
2325
2326 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2327 return;
2328
2329 switch (qtcb->header.fsf_status) {
2330 case FSF_GOOD:
2331 zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
2332 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2333 break;
2334 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2335 zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
2336 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2337 zfcp_fsf_link_down_info_eval(fsf_req, 43,
2338 &qtcb->header.fsf_status_qual.link_down_info);
2339 break;
2340 }
2341}
2342
2343
2344/*
2345 * function: zfcp_fsf_open_port
2346 *
2347 * purpose:
2348 *
2349 * returns: address of initiated FSF request
2350 * NULL - request could not be initiated
2351 */
2352int
2353zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2354{
2355 volatile struct qdio_buffer_element *sbale;
2356 struct zfcp_fsf_req *fsf_req;
2357 unsigned long lock_flags;
2358 int retval = 0;
2359
2360 /* setup new FSF request */
2361 retval = zfcp_fsf_req_create(erp_action->adapter,
2362 FSF_QTCB_OPEN_PORT_WITH_DID,
2363 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2364 erp_action->adapter->pool.fsf_req_erp,
2365 &lock_flags, &fsf_req);
2366 if (retval < 0) {
2367 ZFCP_LOG_INFO("error: Could not create open port request "
2368 "for port 0x%016Lx on adapter %s.\n",
2369 erp_action->port->wwpn,
2370 zfcp_get_busid_by_adapter(erp_action->adapter));
2371 goto out;
2372 }
2373
2374 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2375 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2376 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2377
2378 fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2379 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2380 fsf_req->data = (unsigned long) erp_action->port;
2381 fsf_req->erp_action = erp_action;
2382 erp_action->fsf_req = fsf_req;
2383
2384 zfcp_erp_start_timer(fsf_req);
2385 retval = zfcp_fsf_req_send(fsf_req);
2386 if (retval) {
2387 ZFCP_LOG_INFO("error: Could not send open port request for "
2388 "port 0x%016Lx on adapter %s.\n",
2389 erp_action->port->wwpn,
2390 zfcp_get_busid_by_adapter(erp_action->adapter));
2391 zfcp_fsf_req_free(fsf_req);
2392 erp_action->fsf_req = NULL;
2393 goto out;
2394 }
2395 1435
2396 ZFCP_LOG_DEBUG("open port request initiated "
2397 "(adapter %s, port 0x%016Lx)\n",
2398 zfcp_get_busid_by_adapter(erp_action->adapter),
2399 erp_action->port->wwpn);
2400 out:
2401 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2402 lock_flags);
2403 return retval; 1436 return retval;
2404} 1437}
2405 1438
2406/* 1439static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
2407 * function: zfcp_fsf_open_port_handler
2408 *
2409 * purpose: is called for finished Open Port command
2410 *
2411 * returns:
2412 */
2413static int
2414zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2415{ 1440{
2416 int retval = -EINVAL; 1441 struct zfcp_port *port = req->data;
2417 struct zfcp_port *port; 1442 struct fsf_qtcb_header *header = &req->qtcb->header;
2418 struct fsf_plogi *plogi; 1443 struct fsf_plogi *plogi;
2419 struct fsf_qtcb_header *header;
2420 u16 subtable, rule, counter;
2421 1444
2422 port = (struct zfcp_port *) fsf_req->data; 1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2423 header = &fsf_req->qtcb->header;
2424
2425 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2426 /* don't change port status in our bookkeeping */
2427 goto skip_fsfstatus; 1446 goto skip_fsfstatus;
2428 }
2429 1447
2430 /* evaluate FSF status in QTCB */
2431 switch (header->fsf_status) { 1448 switch (header->fsf_status) {
2432
2433 case FSF_PORT_ALREADY_OPEN: 1449 case FSF_PORT_ALREADY_OPEN:
2434 ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
2435 "is already open.\n",
2436 port->wwpn, zfcp_get_busid_by_port(port));
2437 /*
2438 * This is a bug, however operation should continue normally
2439 * if it is simply ignored
2440 */
2441 break; 1450 break;
2442
2443 case FSF_ACCESS_DENIED: 1451 case FSF_ACCESS_DENIED:
2444 ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx " 1452 zfcp_fsf_access_denied_port(req, port);
2445 "on adapter %s\n",
2446 port->wwpn, zfcp_get_busid_by_port(port));
2447 for (counter = 0; counter < 2; counter++) {
2448 subtable = header->fsf_status_qual.halfword[counter * 2];
2449 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2450 switch (subtable) {
2451 case FSF_SQ_CFDC_SUBTABLE_OS:
2452 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2453 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2454 case FSF_SQ_CFDC_SUBTABLE_LUN:
2455 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2456 zfcp_act_subtable_type[subtable], rule);
2457 break;
2458 }
2459 }
2460 zfcp_erp_port_access_denied(port, 57, fsf_req);
2461 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2462 break; 1453 break;
2463
2464 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1454 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
2465 ZFCP_LOG_INFO("error: The FSF adapter is out of resources. " 1455 dev_warn(&req->adapter->ccw_device->dev,
2466 "The remote port 0x%016Lx on adapter %s " 1456 "The adapter is out of resources. The remote port "
2467 "could not be opened. Disabling it.\n", 1457 "0x%016Lx could not be opened, disabling it.\n",
2468 port->wwpn, zfcp_get_busid_by_port(port)); 1458 port->wwpn);
2469 zfcp_erp_port_failed(port, 31, fsf_req); 1459 zfcp_erp_port_failed(port, 31, req);
2470 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1460 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2471 break; 1461 break;
2472
2473 case FSF_ADAPTER_STATUS_AVAILABLE: 1462 case FSF_ADAPTER_STATUS_AVAILABLE:
2474 switch (header->fsf_status_qual.word[0]) { 1463 switch (header->fsf_status_qual.word[0]) {
2475 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1464 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2476 /* ERP strategy will escalate */
2477 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2478 break;
2479 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1465 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2480 /* ERP strategy will escalate */ 1466 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2481 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2482 break; 1467 break;
2483 case FSF_SQ_NO_RETRY_POSSIBLE: 1468 case FSF_SQ_NO_RETRY_POSSIBLE:
2484 ZFCP_LOG_NORMAL("The remote port 0x%016Lx on " 1469 dev_warn(&req->adapter->ccw_device->dev,
2485 "adapter %s could not be opened. " 1470 "The remote port 0x%016Lx could not be "
2486 "Disabling it.\n", 1471 "opened. Disabling it.\n", port->wwpn);
2487 port->wwpn, 1472 zfcp_erp_port_failed(port, 32, req);
2488 zfcp_get_busid_by_port(port)); 1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2489 zfcp_erp_port_failed(port, 32, fsf_req);
2490 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2491 break;
2492 default:
2493 ZFCP_LOG_NORMAL
2494 ("bug: Wrong status qualifier 0x%x arrived.\n",
2495 header->fsf_status_qual.word[0]);
2496 break; 1474 break;
2497 } 1475 }
2498 break; 1476 break;
2499
2500 case FSF_GOOD: 1477 case FSF_GOOD:
2501 /* save port handle assigned by FSF */
2502 port->handle = header->port_handle; 1478 port->handle = header->port_handle;
2503 ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s "
2504 "was opened, it's port handle is 0x%x\n",
2505 port->wwpn, zfcp_get_busid_by_port(port),
2506 port->handle);
2507 /* mark port as open */
2508 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1479 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
2509 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1480 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2510 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1481 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2511 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1482 ZFCP_STATUS_COMMON_ACCESS_BOXED,
2512 &port->status); 1483 &port->status);
2513 retval = 0;
2514 /* check whether D_ID has changed during open */ 1484 /* check whether D_ID has changed during open */
2515 /* 1485 /*
2516 * FIXME: This check is not airtight, as the FCP channel does 1486 * FIXME: This check is not airtight, as the FCP channel does
@@ -2526,320 +1496,168 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2526 * another GID_PN straight after a port has been opened. 1496 * another GID_PN straight after a port has been opened.
2527 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1497 * Alternately, an ADISC/PDISC ELS should suffice, as well.
2528 */ 1498 */
2529 plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els; 1499 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
2530 if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status)) 1500 break;
2531 { 1501
2532 if (fsf_req->qtcb->bottom.support.els1_length < 1502 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
2533 sizeof (struct fsf_plogi)) { 1503 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
2534 ZFCP_LOG_INFO( 1504 if (plogi->serv_param.wwpn != port->wwpn)
2535 "warning: insufficient length of " 1505 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
2536 "PLOGI payload (%i)\n", 1506 &port->status);
2537 fsf_req->qtcb->bottom.support.els1_length); 1507 else {
2538 /* skip sanity check and assume wwpn is ok */ 1508 port->wwnn = plogi->serv_param.wwnn;
2539 } else { 1509 zfcp_fc_plogi_evaluate(port, plogi);
2540 if (plogi->serv_param.wwpn != port->wwpn) {
2541 ZFCP_LOG_INFO("warning: d_id of port "
2542 "0x%016Lx changed during "
2543 "open\n", port->wwpn);
2544 atomic_clear_mask(
2545 ZFCP_STATUS_PORT_DID_DID,
2546 &port->status);
2547 } else {
2548 port->wwnn = plogi->serv_param.wwnn;
2549 zfcp_plogi_evaluate(port, plogi);
2550 }
2551 } 1510 }
2552 } 1511 }
2553 break; 1512 break;
2554
2555 case FSF_UNKNOWN_OP_SUBTYPE: 1513 case FSF_UNKNOWN_OP_SUBTYPE:
2556 /* should never occure, subtype not set in zfcp_fsf_open_port */ 1514 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2557 ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, "
2558 "op_subtype=0x%x)\n",
2559 zfcp_get_busid_by_port(port),
2560 fsf_req->qtcb->bottom.support.operation_subtype);
2561 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2562 break;
2563
2564 default:
2565 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2566 "(debug info 0x%x)\n",
2567 header->fsf_status);
2568 break; 1515 break;
2569 } 1516 }
2570 1517
2571 skip_fsfstatus: 1518skip_fsfstatus:
2572 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status); 1519 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
2573 return retval;
2574} 1520}
2575 1521
2576/* 1522/**
2577 * function: zfcp_fsf_close_port 1523 * zfcp_fsf_open_port - create and send open port request
2578 * 1524 * @erp_action: pointer to struct zfcp_erp_action
2579 * purpose: submit FSF command "close port" 1525 * Returns: 0 on success, error otherwise
2580 *
2581 * returns: address of initiated FSF request
2582 * NULL - request could not be initiated
2583 */ 1526 */
2584int 1527int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2585zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2586{ 1528{
2587 volatile struct qdio_buffer_element *sbale; 1529 volatile struct qdio_buffer_element *sbale;
2588 struct zfcp_fsf_req *fsf_req; 1530 struct zfcp_adapter *adapter = erp_action->adapter;
2589 unsigned long lock_flags; 1531 struct zfcp_fsf_req *req;
2590 int retval = 0; 1532 int retval = -EIO;
2591 1533
2592 /* setup new FSF request */ 1534 spin_lock(&adapter->req_q.lock);
2593 retval = zfcp_fsf_req_create(erp_action->adapter, 1535 if (zfcp_fsf_req_sbal_get(adapter))
2594 FSF_QTCB_CLOSE_PORT, 1536 goto out;
2595 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1537
2596 erp_action->adapter->pool.fsf_req_erp, 1538 req = zfcp_fsf_req_create(adapter,
2597 &lock_flags, &fsf_req); 1539 FSF_QTCB_OPEN_PORT_WITH_DID,
2598 if (retval < 0) { 1540 ZFCP_REQ_AUTO_CLEANUP,
2599 ZFCP_LOG_INFO("error: Could not create a close port request " 1541 adapter->pool.fsf_req_erp);
2600 "for port 0x%016Lx on adapter %s.\n", 1542 if (unlikely(IS_ERR(req))) {
2601 erp_action->port->wwpn, 1543 retval = PTR_ERR(req);
2602 zfcp_get_busid_by_adapter(erp_action->adapter));
2603 goto out; 1544 goto out;
2604 } 1545 }
2605 1546
2606 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1547 sbale = zfcp_qdio_sbale_req(req);
2607 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1548 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2608 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1549 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2609 1550
2610 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 1551 req->handler = zfcp_fsf_open_port_handler;
2611 fsf_req->data = (unsigned long) erp_action->port; 1552 req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2612 fsf_req->erp_action = erp_action; 1553 req->data = erp_action->port;
2613 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1554 req->erp_action = erp_action;
2614 fsf_req->erp_action = erp_action; 1555 erp_action->fsf_req = req;
2615 erp_action->fsf_req = fsf_req; 1556 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2616 1557
2617 zfcp_erp_start_timer(fsf_req); 1558 zfcp_fsf_start_erp_timer(req);
2618 retval = zfcp_fsf_req_send(fsf_req); 1559 retval = zfcp_fsf_req_send(req);
2619 if (retval) { 1560 if (retval) {
2620 ZFCP_LOG_INFO("error: Could not send a close port request for " 1561 zfcp_fsf_req_free(req);
2621 "port 0x%016Lx on adapter %s.\n",
2622 erp_action->port->wwpn,
2623 zfcp_get_busid_by_adapter(erp_action->adapter));
2624 zfcp_fsf_req_free(fsf_req);
2625 erp_action->fsf_req = NULL; 1562 erp_action->fsf_req = NULL;
2626 goto out;
2627 } 1563 }
2628 1564out:
2629 ZFCP_LOG_TRACE("close port request initiated " 1565 spin_unlock(&adapter->req_q.lock);
2630 "(adapter %s, port 0x%016Lx)\n",
2631 zfcp_get_busid_by_adapter(erp_action->adapter),
2632 erp_action->port->wwpn);
2633 out:
2634 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2635 lock_flags);
2636 return retval; 1566 return retval;
2637} 1567}
2638 1568
2639/* 1569static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
2640 * function: zfcp_fsf_close_port_handler
2641 *
2642 * purpose: is called for finished Close Port FSF command
2643 *
2644 * returns:
2645 */
2646static int
2647zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2648{ 1570{
2649 int retval = -EINVAL; 1571 struct zfcp_port *port = req->data;
2650 struct zfcp_port *port;
2651 1572
2652 port = (struct zfcp_port *) fsf_req->data; 1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2653
2654 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2655 /* don't change port status in our bookkeeping */
2656 goto skip_fsfstatus; 1574 goto skip_fsfstatus;
2657 }
2658
2659 /* evaluate FSF status in QTCB */
2660 switch (fsf_req->qtcb->header.fsf_status) {
2661 1575
1576 switch (req->qtcb->header.fsf_status) {
2662 case FSF_PORT_HANDLE_NOT_VALID: 1577 case FSF_PORT_HANDLE_NOT_VALID:
2663 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " 1578 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
2664 "0x%016Lx on adapter %s invalid. This may happen " 1579 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2665 "occasionally.\n", port->handle,
2666 port->wwpn, zfcp_get_busid_by_port(port));
2667 ZFCP_LOG_DEBUG("status qualifier:\n");
2668 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2669 (char *) &fsf_req->qtcb->header.fsf_status_qual,
2670 sizeof (union fsf_status_qual));
2671 zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req);
2672 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2673 break; 1580 break;
2674
2675 case FSF_ADAPTER_STATUS_AVAILABLE: 1581 case FSF_ADAPTER_STATUS_AVAILABLE:
2676 /* Note: FSF has actually closed the port in this case.
2677 * The status code is just daft. Fingers crossed for a change
2678 */
2679 retval = 0;
2680 break; 1582 break;
2681
2682 case FSF_GOOD: 1583 case FSF_GOOD:
2683 ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, " 1584 zfcp_erp_modify_port_status(port, 33, req,
2684 "port handle 0x%x\n", port->wwpn,
2685 zfcp_get_busid_by_port(port), port->handle);
2686 zfcp_erp_modify_port_status(port, 33, fsf_req,
2687 ZFCP_STATUS_COMMON_OPEN, 1585 ZFCP_STATUS_COMMON_OPEN,
2688 ZFCP_CLEAR); 1586 ZFCP_CLEAR);
2689 retval = 0;
2690 break;
2691
2692 default:
2693 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2694 "(debug info 0x%x)\n",
2695 fsf_req->qtcb->header.fsf_status);
2696 break; 1587 break;
2697 } 1588 }
2698 1589
2699 skip_fsfstatus: 1590skip_fsfstatus:
2700 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status); 1591 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
2701 return retval;
2702} 1592}
2703 1593
2704/* 1594/**
2705 * function: zfcp_fsf_close_physical_port 1595 * zfcp_fsf_close_port - create and send close port request
2706 * 1596 * @erp_action: pointer to struct zfcp_erp_action
2707 * purpose: submit FSF command "close physical port" 1597 * Returns: 0 on success, error otherwise
2708 *
2709 * returns: address of initiated FSF request
2710 * NULL - request could not be initiated
2711 */ 1598 */
2712int 1599int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2713zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2714{ 1600{
2715 volatile struct qdio_buffer_element *sbale; 1601 volatile struct qdio_buffer_element *sbale;
2716 struct zfcp_fsf_req *fsf_req; 1602 struct zfcp_adapter *adapter = erp_action->adapter;
2717 unsigned long lock_flags; 1603 struct zfcp_fsf_req *req;
2718 int retval = 0; 1604 int retval = -EIO;
2719
2720 /* setup new FSF request */
2721 retval = zfcp_fsf_req_create(erp_action->adapter,
2722 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2723 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2724 erp_action->adapter->pool.fsf_req_erp,
2725 &lock_flags, &fsf_req);
2726 if (retval < 0) {
2727 ZFCP_LOG_INFO("error: Could not create close physical port "
2728 "request (adapter %s, port 0x%016Lx)\n",
2729 zfcp_get_busid_by_adapter(erp_action->adapter),
2730 erp_action->port->wwpn);
2731 1605
1606 spin_lock(&adapter->req_q.lock);
1607 if (zfcp_fsf_req_sbal_get(adapter))
1608 goto out;
1609
1610 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1611 ZFCP_REQ_AUTO_CLEANUP,
1612 adapter->pool.fsf_req_erp);
1613 if (unlikely(IS_ERR(req))) {
1614 retval = PTR_ERR(req);
2732 goto out; 1615 goto out;
2733 } 1616 }
2734 1617
2735 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1618 sbale = zfcp_qdio_sbale_req(req);
2736 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1619 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2737 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1620 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2738 1621
2739 /* mark port as being closed */ 1622 req->handler = zfcp_fsf_close_port_handler;
2740 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 1623 req->data = erp_action->port;
2741 &erp_action->port->status); 1624 req->erp_action = erp_action;
2742 /* save a pointer to this port */ 1625 req->qtcb->header.port_handle = erp_action->port->handle;
2743 fsf_req->data = (unsigned long) erp_action->port; 1626 erp_action->fsf_req = req;
2744 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1627 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2745 fsf_req->erp_action = erp_action; 1628
2746 erp_action->fsf_req = fsf_req; 1629 zfcp_fsf_start_erp_timer(req);
2747 1630 retval = zfcp_fsf_req_send(req);
2748 zfcp_erp_start_timer(fsf_req);
2749 retval = zfcp_fsf_req_send(fsf_req);
2750 if (retval) { 1631 if (retval) {
2751 ZFCP_LOG_INFO("error: Could not send close physical port " 1632 zfcp_fsf_req_free(req);
2752 "request (adapter %s, port 0x%016Lx)\n",
2753 zfcp_get_busid_by_adapter(erp_action->adapter),
2754 erp_action->port->wwpn);
2755 zfcp_fsf_req_free(fsf_req);
2756 erp_action->fsf_req = NULL; 1633 erp_action->fsf_req = NULL;
2757 goto out;
2758 } 1634 }
2759 1635out:
2760 ZFCP_LOG_TRACE("close physical port request initiated " 1636 spin_unlock(&adapter->req_q.lock);
2761 "(adapter %s, port 0x%016Lx)\n",
2762 zfcp_get_busid_by_adapter(erp_action->adapter),
2763 erp_action->port->wwpn);
2764 out:
2765 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2766 lock_flags);
2767 return retval; 1637 return retval;
2768} 1638}
2769 1639
2770/* 1640static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2771 * function: zfcp_fsf_close_physical_port_handler
2772 *
2773 * purpose: is called for finished Close Physical Port FSF command
2774 *
2775 * returns:
2776 */
2777static int
2778zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2779{ 1641{
2780 int retval = -EINVAL; 1642 struct zfcp_port *port = req->data;
2781 struct zfcp_port *port; 1643 struct fsf_qtcb_header *header = &req->qtcb->header;
2782 struct zfcp_unit *unit; 1644 struct zfcp_unit *unit;
2783 struct fsf_qtcb_header *header;
2784 u16 subtable, rule, counter;
2785 1645
2786 port = (struct zfcp_port *) fsf_req->data; 1646 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2787 header = &fsf_req->qtcb->header;
2788
2789 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2790 /* don't change port status in our bookkeeping */
2791 goto skip_fsfstatus; 1647 goto skip_fsfstatus;
2792 }
2793 1648
2794 /* evaluate FSF status in QTCB */
2795 switch (header->fsf_status) { 1649 switch (header->fsf_status) {
2796
2797 case FSF_PORT_HANDLE_NOT_VALID: 1650 case FSF_PORT_HANDLE_NOT_VALID:
2798 ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid" 1651 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
2799 "(adapter %s, port 0x%016Lx). " 1652 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2800 "This may happen occasionally.\n",
2801 port->handle,
2802 zfcp_get_busid_by_port(port),
2803 port->wwpn);
2804 ZFCP_LOG_DEBUG("status qualifier:\n");
2805 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2806 (char *) &header->fsf_status_qual,
2807 sizeof (union fsf_status_qual));
2808 zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req);
2809 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2810 break; 1653 break;
2811
2812 case FSF_ACCESS_DENIED: 1654 case FSF_ACCESS_DENIED:
2813 ZFCP_LOG_NORMAL("Access denied, cannot close " 1655 zfcp_fsf_access_denied_port(req, port);
2814 "physical port 0x%016Lx on adapter %s\n",
2815 port->wwpn, zfcp_get_busid_by_port(port));
2816 for (counter = 0; counter < 2; counter++) {
2817 subtable = header->fsf_status_qual.halfword[counter * 2];
2818 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2819 switch (subtable) {
2820 case FSF_SQ_CFDC_SUBTABLE_OS:
2821 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2822 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2823 case FSF_SQ_CFDC_SUBTABLE_LUN:
2824 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2825 zfcp_act_subtable_type[subtable], rule);
2826 break;
2827 }
2828 }
2829 zfcp_erp_port_access_denied(port, 58, fsf_req);
2830 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2831 break; 1656 break;
2832
2833 case FSF_PORT_BOXED: 1657 case FSF_PORT_BOXED:
2834 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter " 1658 zfcp_erp_port_boxed(port, 50, req);
2835 "%s needs to be reopened but it was attempted " 1659 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2836 "to close it physically.\n", 1660 ZFCP_STATUS_FSFREQ_RETRY;
2837 port->wwpn,
2838 zfcp_get_busid_by_port(port));
2839 zfcp_erp_port_boxed(port, 50, fsf_req);
2840 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2841 ZFCP_STATUS_FSFREQ_RETRY;
2842
2843 /* can't use generic zfcp_erp_modify_port_status because 1661 /* can't use generic zfcp_erp_modify_port_status because
2844 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1662 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2845 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1663 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
@@ -2847,154 +1665,88 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2847 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1665 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
2848 &unit->status); 1666 &unit->status);
2849 break; 1667 break;
2850
2851 case FSF_ADAPTER_STATUS_AVAILABLE: 1668 case FSF_ADAPTER_STATUS_AVAILABLE:
2852 switch (header->fsf_status_qual.word[0]) { 1669 switch (header->fsf_status_qual.word[0]) {
2853 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1670 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2854 /* This will now be escalated by ERP */ 1671 /* fall through */
2855 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2856 break;
2857 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1672 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2858 /* ERP strategy will escalate */ 1673 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2859 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2860 break;
2861 default:
2862 ZFCP_LOG_NORMAL
2863 ("bug: Wrong status qualifier 0x%x arrived.\n",
2864 header->fsf_status_qual.word[0]);
2865 break; 1674 break;
2866 } 1675 }
2867 break; 1676 break;
2868
2869 case FSF_GOOD: 1677 case FSF_GOOD:
2870 ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s "
2871 "physically closed, port handle 0x%x\n",
2872 port->wwpn,
2873 zfcp_get_busid_by_port(port), port->handle);
2874 /* can't use generic zfcp_erp_modify_port_status because 1678 /* can't use generic zfcp_erp_modify_port_status because
2875 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1679 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2876 */ 1680 */
2877 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1681 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2878 list_for_each_entry(unit, &port->unit_list_head, list) 1682 list_for_each_entry(unit, &port->unit_list_head, list)
2879 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1683 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
2880 retval = 0; 1684 &unit->status);
2881 break;
2882
2883 default:
2884 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2885 "(debug info 0x%x)\n",
2886 header->fsf_status);
2887 break; 1685 break;
2888 } 1686 }
2889 1687skip_fsfstatus:
2890 skip_fsfstatus:
2891 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status); 1688 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
2892 return retval;
2893} 1689}
2894 1690
2895/* 1691/**
2896 * function: zfcp_fsf_open_unit 1692 * zfcp_fsf_close_physical_port - close physical port
2897 * 1693 * @erp_action: pointer to struct zfcp_erp_action
2898 * purpose: 1694 * Returns: 0 on success
2899 *
2900 * returns:
2901 *
2902 * assumptions: This routine does not check whether the associated
2903 * remote port has already been opened. This should be
2904 * done by calling routines. Otherwise some status
2905 * may be presented by FSF
2906 */ 1695 */
2907int 1696int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2908zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2909{ 1697{
2910 volatile struct qdio_buffer_element *sbale; 1698 volatile struct qdio_buffer_element *sbale;
2911 struct zfcp_fsf_req *fsf_req; 1699 struct zfcp_adapter *adapter = erp_action->adapter;
2912 unsigned long lock_flags; 1700 struct zfcp_fsf_req *req;
2913 int retval = 0; 1701 int retval = -EIO;
2914 1702
2915 /* setup new FSF request */ 1703 spin_lock(&adapter->req_q.lock);
2916 retval = zfcp_fsf_req_create(erp_action->adapter, 1704 if (zfcp_fsf_req_sbal_get(adapter))
2917 FSF_QTCB_OPEN_LUN, 1705 goto out;
2918 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1706
2919 erp_action->adapter->pool.fsf_req_erp, 1707 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2920 &lock_flags, &fsf_req); 1708 ZFCP_REQ_AUTO_CLEANUP,
2921 if (retval < 0) { 1709 adapter->pool.fsf_req_erp);
2922 ZFCP_LOG_INFO("error: Could not create open unit request for " 1710 if (unlikely(IS_ERR(req))) {
2923 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 1711 retval = PTR_ERR(req);
2924 erp_action->unit->fcp_lun,
2925 erp_action->unit->port->wwpn,
2926 zfcp_get_busid_by_adapter(erp_action->adapter));
2927 goto out; 1712 goto out;
2928 } 1713 }
2929 1714
2930 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1715 sbale = zfcp_qdio_sbale_req(req);
2931 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1716 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2932 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1717 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2933 1718
2934 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1719 req->data = erp_action->port;
2935 fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1720 req->qtcb->header.port_handle = erp_action->port->handle;
2936 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1721 req->erp_action = erp_action;
2937 fsf_req->qtcb->bottom.support.option = 1722 req->handler = zfcp_fsf_close_physical_port_handler;
2938 FSF_OPEN_LUN_SUPPRESS_BOXING; 1723 erp_action->fsf_req = req;
2939 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 1724 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2940 fsf_req->data = (unsigned long) erp_action->unit; 1725 &erp_action->port->status);
2941 fsf_req->erp_action = erp_action;
2942 erp_action->fsf_req = fsf_req;
2943 1726
2944 zfcp_erp_start_timer(fsf_req); 1727 zfcp_fsf_start_erp_timer(req);
2945 retval = zfcp_fsf_req_send(erp_action->fsf_req); 1728 retval = zfcp_fsf_req_send(req);
2946 if (retval) { 1729 if (retval) {
2947 ZFCP_LOG_INFO("error: Could not send an open unit request " 1730 zfcp_fsf_req_free(req);
2948 "on the adapter %s, port 0x%016Lx for "
2949 "unit 0x%016Lx\n",
2950 zfcp_get_busid_by_adapter(erp_action->adapter),
2951 erp_action->port->wwpn,
2952 erp_action->unit->fcp_lun);
2953 zfcp_fsf_req_free(fsf_req);
2954 erp_action->fsf_req = NULL; 1731 erp_action->fsf_req = NULL;
2955 goto out;
2956 } 1732 }
2957 1733out:
2958 ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, " 1734 spin_unlock(&adapter->req_q.lock);
2959 "port 0x%016Lx, unit 0x%016Lx)\n",
2960 zfcp_get_busid_by_adapter(erp_action->adapter),
2961 erp_action->port->wwpn, erp_action->unit->fcp_lun);
2962 out:
2963 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2964 lock_flags);
2965 return retval; 1735 return retval;
2966} 1736}
2967 1737
2968/* 1738static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
2969 * function: zfcp_fsf_open_unit_handler
2970 *
2971 * purpose: is called for finished Open LUN command
2972 *
2973 * returns:
2974 */
2975static int
2976zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2977{ 1739{
2978 int retval = -EINVAL; 1740 struct zfcp_adapter *adapter = req->adapter;
2979 struct zfcp_adapter *adapter; 1741 struct zfcp_unit *unit = req->data;
2980 struct zfcp_unit *unit; 1742 struct fsf_qtcb_header *header = &req->qtcb->header;
2981 struct fsf_qtcb_header *header; 1743 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
2982 struct fsf_qtcb_bottom_support *bottom; 1744 struct fsf_queue_designator *queue_designator =
2983 struct fsf_queue_designator *queue_designator; 1745 &header->fsf_status_qual.fsf_queue_designator;
2984 u16 subtable, rule, counter;
2985 int exclusive, readwrite; 1746 int exclusive, readwrite;
2986 1747
2987 unit = (struct zfcp_unit *) fsf_req->data; 1748 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2988
2989 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2990 /* don't change unit status in our bookkeeping */
2991 goto skip_fsfstatus; 1749 goto skip_fsfstatus;
2992 }
2993
2994 adapter = fsf_req->adapter;
2995 header = &fsf_req->qtcb->header;
2996 bottom = &fsf_req->qtcb->bottom.support;
2997 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
2998 1750
2999 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1751 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
3000 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1752 ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -3002,155 +1754,65 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3002 ZFCP_STATUS_UNIT_READONLY, 1754 ZFCP_STATUS_UNIT_READONLY,
3003 &unit->status); 1755 &unit->status);
3004 1756
3005 /* evaluate FSF status in QTCB */
3006 switch (header->fsf_status) { 1757 switch (header->fsf_status) {
3007 1758
3008 case FSF_PORT_HANDLE_NOT_VALID: 1759 case FSF_PORT_HANDLE_NOT_VALID:
3009 ZFCP_LOG_INFO("Temporary port identifier 0x%x " 1760 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
3010 "for port 0x%016Lx on adapter %s invalid " 1761 /* fall through */
3011 "This may happen occasionally\n",
3012 unit->port->handle,
3013 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3014 ZFCP_LOG_DEBUG("status qualifier:\n");
3015 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3016 (char *) &header->fsf_status_qual,
3017 sizeof (union fsf_status_qual));
3018 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req);
3019 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3020 break;
3021
3022 case FSF_LUN_ALREADY_OPEN: 1762 case FSF_LUN_ALREADY_OPEN:
3023 ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on "
3024 "remote port 0x%016Lx on adapter %s twice.\n",
3025 unit->fcp_lun,
3026 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3027 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3028 break; 1763 break;
3029
3030 case FSF_ACCESS_DENIED: 1764 case FSF_ACCESS_DENIED:
3031 ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on " 1765 zfcp_fsf_access_denied_unit(req, unit);
3032 "remote port 0x%016Lx on adapter %s\n",
3033 unit->fcp_lun, unit->port->wwpn,
3034 zfcp_get_busid_by_unit(unit));
3035 for (counter = 0; counter < 2; counter++) {
3036 subtable = header->fsf_status_qual.halfword[counter * 2];
3037 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3038 switch (subtable) {
3039 case FSF_SQ_CFDC_SUBTABLE_OS:
3040 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3041 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3042 case FSF_SQ_CFDC_SUBTABLE_LUN:
3043 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3044 zfcp_act_subtable_type[subtable], rule);
3045 break;
3046 }
3047 }
3048 zfcp_erp_unit_access_denied(unit, 59, fsf_req);
3049 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1766 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3050 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1767 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3051 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3052 break; 1768 break;
3053
3054 case FSF_PORT_BOXED: 1769 case FSF_PORT_BOXED:
3055 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " 1770 zfcp_erp_port_boxed(unit->port, 51, req);
3056 "needs to be reopened\n", 1771 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3057 unit->port->wwpn, zfcp_get_busid_by_unit(unit)); 1772 ZFCP_STATUS_FSFREQ_RETRY;
3058 zfcp_erp_port_boxed(unit->port, 51, fsf_req);
3059 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3060 ZFCP_STATUS_FSFREQ_RETRY;
3061 break; 1773 break;
3062
3063 case FSF_LUN_SHARING_VIOLATION: 1774 case FSF_LUN_SHARING_VIOLATION:
3064 if (header->fsf_status_qual.word[0] != 0) { 1775 if (header->fsf_status_qual.word[0])
3065 ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port " 1776 dev_warn(&adapter->ccw_device->dev,
3066 "with WWPN 0x%Lx " 1777 "FCP-LUN 0x%Lx at the remote port "
3067 "connected to the adapter %s " 1778 "with WWPN 0x%Lx "
3068 "is already in use in LPAR%d, CSS%d\n", 1779 "connected to the adapter "
3069 unit->fcp_lun, 1780 "is already in use in LPAR%d, CSS%d.\n",
3070 unit->port->wwpn, 1781 unit->fcp_lun,
3071 zfcp_get_busid_by_unit(unit), 1782 unit->port->wwpn,
3072 queue_designator->hla, 1783 queue_designator->hla,
3073 queue_designator->cssid); 1784 queue_designator->cssid);
3074 } else { 1785 else
3075 subtable = header->fsf_status_qual.halfword[4]; 1786 zfcp_act_eval_err(adapter,
3076 rule = header->fsf_status_qual.halfword[5]; 1787 header->fsf_status_qual.word[2]);
3077 switch (subtable) { 1788 zfcp_erp_unit_access_denied(unit, 60, req);
3078 case FSF_SQ_CFDC_SUBTABLE_OS:
3079 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3080 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3081 case FSF_SQ_CFDC_SUBTABLE_LUN:
3082 ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
3083 "remote port with WWPN 0x%Lx "
3084 "connected to the adapter %s "
3085 "is denied (%s rule %d)\n",
3086 unit->fcp_lun,
3087 unit->port->wwpn,
3088 zfcp_get_busid_by_unit(unit),
3089 zfcp_act_subtable_type[subtable],
3090 rule);
3091 break;
3092 }
3093 }
3094 ZFCP_LOG_DEBUG("status qualifier:\n");
3095 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3096 (char *) &header->fsf_status_qual,
3097 sizeof (union fsf_status_qual));
3098 zfcp_erp_unit_access_denied(unit, 60, fsf_req);
3099 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1789 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3100 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1790 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3101 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1791 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3102 break; 1792 break;
3103
3104 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1793 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
3105 ZFCP_LOG_INFO("error: The adapter ran out of resources. " 1794 dev_warn(&adapter->ccw_device->dev,
3106 "There is no handle (temporary port identifier) " 1795 "The adapter ran out of resources. There is no "
3107 "available for unit 0x%016Lx on port 0x%016Lx " 1796 "handle available for unit 0x%016Lx on port 0x%016Lx.",
3108 "on adapter %s\n", 1797 unit->fcp_lun, unit->port->wwpn);
3109 unit->fcp_lun, 1798 zfcp_erp_unit_failed(unit, 34, req);
3110 unit->port->wwpn, 1799 /* fall through */
3111 zfcp_get_busid_by_unit(unit)); 1800 case FSF_INVALID_COMMAND_OPTION:
3112 zfcp_erp_unit_failed(unit, 34, fsf_req); 1801 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3113 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3114 break; 1802 break;
3115
3116 case FSF_ADAPTER_STATUS_AVAILABLE: 1803 case FSF_ADAPTER_STATUS_AVAILABLE:
3117 switch (header->fsf_status_qual.word[0]) { 1804 switch (header->fsf_status_qual.word[0]) {
3118 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1805 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3119 /* Re-establish link to port */
3120 zfcp_test_link(unit->port); 1806 zfcp_test_link(unit->port);
3121 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1807 /* fall through */
3122 break;
3123 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1808 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3124 /* ERP strategy will escalate */ 1809 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3125 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3126 break; 1810 break;
3127 default:
3128 ZFCP_LOG_NORMAL
3129 ("bug: Wrong status qualifier 0x%x arrived.\n",
3130 header->fsf_status_qual.word[0]);
3131 } 1811 }
3132 break; 1812 break;
3133 1813
3134 case FSF_INVALID_COMMAND_OPTION:
3135 ZFCP_LOG_NORMAL(
3136 "Invalid option 0x%x has been specified "
3137 "in QTCB bottom sent to the adapter %s\n",
3138 bottom->option,
3139 zfcp_get_busid_by_adapter(adapter));
3140 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3141 retval = -EINVAL;
3142 break;
3143
3144 case FSF_GOOD: 1814 case FSF_GOOD:
3145 /* save LUN handle assigned by FSF */
3146 unit->handle = header->lun_handle; 1815 unit->handle = header->lun_handle;
3147 ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on "
3148 "adapter %s opened, port handle 0x%x\n",
3149 unit->fcp_lun,
3150 unit->port->wwpn,
3151 zfcp_get_busid_by_unit(unit),
3152 unit->handle);
3153 /* mark unit as open */
3154 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1816 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3155 1817
3156 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1818 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
@@ -3168,1528 +1830,629 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3168 if (!readwrite) { 1830 if (!readwrite) {
3169 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1831 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
3170 &unit->status); 1832 &unit->status);
3171 ZFCP_LOG_NORMAL("read-only access for unit " 1833 dev_info(&adapter->ccw_device->dev,
3172 "(adapter %s, wwpn=0x%016Lx, " 1834 "Read-only access for unit 0x%016Lx "
3173 "fcp_lun=0x%016Lx)\n", 1835 "on port 0x%016Lx.\n",
3174 zfcp_get_busid_by_unit(unit), 1836 unit->fcp_lun, unit->port->wwpn);
3175 unit->port->wwpn,
3176 unit->fcp_lun);
3177 } 1837 }
3178 1838
3179 if (exclusive && !readwrite) { 1839 if (exclusive && !readwrite) {
3180 ZFCP_LOG_NORMAL("exclusive access of read-only " 1840 dev_err(&adapter->ccw_device->dev,
3181 "unit not supported\n"); 1841 "Exclusive access of read-only unit "
3182 zfcp_erp_unit_failed(unit, 35, fsf_req); 1842 "0x%016Lx on port 0x%016Lx not "
3183 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1843 "supported, disabling unit.\n",
3184 zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req); 1844 unit->fcp_lun, unit->port->wwpn);
1845 zfcp_erp_unit_failed(unit, 35, req);
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 zfcp_erp_unit_shutdown(unit, 0, 80, req);
3185 } else if (!exclusive && readwrite) { 1848 } else if (!exclusive && readwrite) {
3186 ZFCP_LOG_NORMAL("shared access of read-write " 1849 dev_err(&adapter->ccw_device->dev,
3187 "unit not supported\n"); 1850 "Shared access of read-write unit "
3188 zfcp_erp_unit_failed(unit, 36, fsf_req); 1851 "0x%016Lx on port 0x%016Lx not "
3189 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1852 "supported, disabling unit.\n",
3190 zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req); 1853 unit->fcp_lun, unit->port->wwpn);
1854 zfcp_erp_unit_failed(unit, 36, req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 zfcp_erp_unit_shutdown(unit, 0, 81, req);
3191 } 1857 }
3192 } 1858 }
3193
3194 retval = 0;
3195 break;
3196
3197 default:
3198 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3199 "(debug info 0x%x)\n",
3200 header->fsf_status);
3201 break; 1859 break;
3202 } 1860 }
3203 1861
3204 skip_fsfstatus: 1862skip_fsfstatus:
3205 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status); 1863 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
3206 return retval;
3207} 1864}
3208 1865
3209/* 1866/**
3210 * function: zfcp_fsf_close_unit 1867 * zfcp_fsf_open_unit - open unit
3211 * 1868 * @erp_action: pointer to struct zfcp_erp_action
3212 * purpose: 1869 * Returns: 0 on success, error otherwise
3213 *
3214 * returns: address of fsf_req - request successfully initiated
3215 * NULL -
3216 *
3217 * assumptions: This routine does not check whether the associated
3218 * remote port/lun has already been opened. This should be
3219 * done by calling routines. Otherwise some status
3220 * may be presented by FSF
3221 */ 1870 */
3222int 1871int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3223zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3224{ 1872{
3225 volatile struct qdio_buffer_element *sbale; 1873 volatile struct qdio_buffer_element *sbale;
3226 struct zfcp_fsf_req *fsf_req; 1874 struct zfcp_adapter *adapter = erp_action->adapter;
3227 unsigned long lock_flags; 1875 struct zfcp_fsf_req *req;
3228 int retval = 0; 1876 int retval = -EIO;
3229 1877
3230 /* setup new FSF request */ 1878 spin_lock(&adapter->req_q.lock);
3231 retval = zfcp_fsf_req_create(erp_action->adapter, 1879 if (zfcp_fsf_req_sbal_get(adapter))
3232 FSF_QTCB_CLOSE_LUN, 1880 goto out;
3233 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1881
3234 erp_action->adapter->pool.fsf_req_erp, 1882 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
3235 &lock_flags, &fsf_req); 1883 ZFCP_REQ_AUTO_CLEANUP,
3236 if (retval < 0) { 1884 adapter->pool.fsf_req_erp);
3237 ZFCP_LOG_INFO("error: Could not create close unit request for " 1885 if (unlikely(IS_ERR(req))) {
3238 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 1886 retval = PTR_ERR(req);
3239 erp_action->unit->fcp_lun,
3240 erp_action->port->wwpn,
3241 zfcp_get_busid_by_adapter(erp_action->adapter));
3242 goto out; 1887 goto out;
3243 } 1888 }
3244 1889
3245 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1890 sbale = zfcp_qdio_sbale_req(req);
3246 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1891 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3247 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1892 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3248 1893
3249 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1894 req->qtcb->header.port_handle = erp_action->port->handle;
3250 fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; 1895 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
3251 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 1896 req->handler = zfcp_fsf_open_unit_handler;
3252 fsf_req->data = (unsigned long) erp_action->unit; 1897 req->data = erp_action->unit;
3253 fsf_req->erp_action = erp_action; 1898 req->erp_action = erp_action;
3254 erp_action->fsf_req = fsf_req; 1899 erp_action->fsf_req = req;
3255 1900
3256 zfcp_erp_start_timer(fsf_req); 1901 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
3257 retval = zfcp_fsf_req_send(erp_action->fsf_req); 1902 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1903
1904 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1905
1906 zfcp_fsf_start_erp_timer(req);
1907 retval = zfcp_fsf_req_send(req);
3258 if (retval) { 1908 if (retval) {
3259 ZFCP_LOG_INFO("error: Could not send a close unit request for " 1909 zfcp_fsf_req_free(req);
3260 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3261 erp_action->unit->fcp_lun,
3262 erp_action->port->wwpn,
3263 zfcp_get_busid_by_adapter(erp_action->adapter));
3264 zfcp_fsf_req_free(fsf_req);
3265 erp_action->fsf_req = NULL; 1910 erp_action->fsf_req = NULL;
3266 goto out;
3267 } 1911 }
3268 1912out:
3269 ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, " 1913 spin_unlock(&adapter->req_q.lock);
3270 "port 0x%016Lx, unit 0x%016Lx)\n",
3271 zfcp_get_busid_by_adapter(erp_action->adapter),
3272 erp_action->port->wwpn, erp_action->unit->fcp_lun);
3273 out:
3274 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
3275 lock_flags);
3276 return retval; 1914 return retval;
3277} 1915}
3278 1916
3279/* 1917static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
3280 * function: zfcp_fsf_close_unit_handler
3281 *
3282 * purpose: is called for finished Close LUN FSF command
3283 *
3284 * returns:
3285 */
3286static int
3287zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3288{ 1918{
3289 int retval = -EINVAL; 1919 struct zfcp_unit *unit = req->data;
3290 struct zfcp_unit *unit;
3291
3292 unit = (struct zfcp_unit *) fsf_req->data;
3293 1920
3294 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1921 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
3295 /* don't change unit status in our bookkeeping */
3296 goto skip_fsfstatus; 1922 goto skip_fsfstatus;
3297 }
3298
3299 /* evaluate FSF status in QTCB */
3300 switch (fsf_req->qtcb->header.fsf_status) {
3301 1923
1924 switch (req->qtcb->header.fsf_status) {
3302 case FSF_PORT_HANDLE_NOT_VALID: 1925 case FSF_PORT_HANDLE_NOT_VALID:
3303 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " 1926 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
3304 "0x%016Lx on adapter %s invalid. This may " 1927 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3305 "happen in rare circumstances\n",
3306 unit->port->handle,
3307 unit->port->wwpn,
3308 zfcp_get_busid_by_unit(unit));
3309 ZFCP_LOG_DEBUG("status qualifier:\n");
3310 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3311 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3312 sizeof (union fsf_status_qual));
3313 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req);
3314 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3315 break; 1928 break;
3316
3317 case FSF_LUN_HANDLE_NOT_VALID: 1929 case FSF_LUN_HANDLE_NOT_VALID:
3318 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit " 1930 zfcp_erp_port_reopen(unit->port, 0, 111, req);
3319 "0x%016Lx on port 0x%016Lx on adapter %s is " 1931 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3320 "invalid. This may happen occasionally.\n",
3321 unit->handle,
3322 unit->fcp_lun,
3323 unit->port->wwpn,
3324 zfcp_get_busid_by_unit(unit));
3325 ZFCP_LOG_DEBUG("Status qualifier data:\n");
3326 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3327 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3328 sizeof (union fsf_status_qual));
3329 zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req);
3330 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3331 break; 1932 break;
3332
3333 case FSF_PORT_BOXED: 1933 case FSF_PORT_BOXED:
3334 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " 1934 zfcp_erp_port_boxed(unit->port, 52, req);
3335 "needs to be reopened\n", 1935 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3336 unit->port->wwpn, 1936 ZFCP_STATUS_FSFREQ_RETRY;
3337 zfcp_get_busid_by_unit(unit));
3338 zfcp_erp_port_boxed(unit->port, 52, fsf_req);
3339 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3340 ZFCP_STATUS_FSFREQ_RETRY;
3341 break; 1937 break;
3342
3343 case FSF_ADAPTER_STATUS_AVAILABLE: 1938 case FSF_ADAPTER_STATUS_AVAILABLE:
3344 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { 1939 switch (req->qtcb->header.fsf_status_qual.word[0]) {
3345 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1940 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3346 /* re-establish link to port */
3347 zfcp_test_link(unit->port); 1941 zfcp_test_link(unit->port);
3348 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1942 /* fall through */
3349 break;
3350 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1943 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3351 /* ERP strategy will escalate */ 1944 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3352 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3353 break;
3354 default:
3355 ZFCP_LOG_NORMAL
3356 ("bug: Wrong status qualifier 0x%x arrived.\n",
3357 fsf_req->qtcb->header.fsf_status_qual.word[0]);
3358 break; 1945 break;
3359 } 1946 }
3360 break; 1947 break;
3361
3362 case FSF_GOOD: 1948 case FSF_GOOD:
3363 ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s "
3364 "closed, port handle 0x%x\n",
3365 unit->fcp_lun,
3366 unit->port->wwpn,
3367 zfcp_get_busid_by_unit(unit),
3368 unit->handle);
3369 /* mark unit as closed */
3370 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1949 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3371 retval = 0;
3372 break;
3373
3374 default:
3375 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3376 "(debug info 0x%x)\n",
3377 fsf_req->qtcb->header.fsf_status);
3378 break; 1950 break;
3379 } 1951 }
3380 1952skip_fsfstatus:
3381 skip_fsfstatus:
3382 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status); 1953 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
3383 return retval;
3384} 1954}
3385 1955
3386/** 1956/**
3387 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 1957 * zfcp_fsf_close_unit - close zfcp unit
3388 * @adapter: adapter where scsi command is issued 1958 * @erp_action: pointer to struct zfcp_unit
3389 * @unit: unit where command is sent to 1959 * Returns: 0 on success, error otherwise
3390 * @scsi_cmnd: scsi command to be sent
3391 * @timer: timer to be started when request is initiated
3392 * @req_flags: flags for fsf_request
3393 */ 1960 */
3394int 1961int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3395zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3396 struct zfcp_unit *unit,
3397 struct scsi_cmnd * scsi_cmnd,
3398 int use_timer, int req_flags)
3399{ 1962{
3400 struct zfcp_fsf_req *fsf_req = NULL; 1963 volatile struct qdio_buffer_element *sbale;
3401 struct fcp_cmnd_iu *fcp_cmnd_iu; 1964 struct zfcp_adapter *adapter = erp_action->adapter;
3402 unsigned int sbtype; 1965 struct zfcp_fsf_req *req;
3403 unsigned long lock_flags; 1966 int retval = -EIO;
3404 int real_bytes = 0;
3405 int retval = 0;
3406 int mask;
3407
3408 /* setup new FSF request */
3409 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3410 adapter->pool.fsf_req_scsi,
3411 &lock_flags, &fsf_req);
3412 if (unlikely(retval < 0)) {
3413 ZFCP_LOG_DEBUG("error: Could not create FCP command request "
3414 "for unit 0x%016Lx on port 0x%016Lx on "
3415 "adapter %s\n",
3416 unit->fcp_lun,
3417 unit->port->wwpn,
3418 zfcp_get_busid_by_adapter(adapter));
3419 goto failed_req_create;
3420 }
3421
3422 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
3423 &unit->status))) {
3424 retval = -EBUSY;
3425 goto unit_blocked;
3426 }
3427
3428 zfcp_unit_get(unit);
3429 fsf_req->unit = unit;
3430
3431 /* associate FSF request with SCSI request (for look up on abort) */
3432 scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
3433
3434 /* associate SCSI command with FSF request */
3435 fsf_req->data = (unsigned long) scsi_cmnd;
3436
3437 /* set handles of unit and its parent port in QTCB */
3438 fsf_req->qtcb->header.lun_handle = unit->handle;
3439 fsf_req->qtcb->header.port_handle = unit->port->handle;
3440
3441 /* FSF does not define the structure of the FCP_CMND IU */
3442 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3443 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3444
3445 /*
3446 * set depending on data direction:
3447 * data direction bits in SBALE (SB Type)
3448 * data direction bits in QTCB
3449 * data direction bits in FCP_CMND IU
3450 */
3451 switch (scsi_cmnd->sc_data_direction) {
3452 case DMA_NONE:
3453 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3454 /*
3455 * FIXME(qdio):
3456 * what is the correct type for commands
3457 * without 'real' data buffers?
3458 */
3459 sbtype = SBAL_FLAGS0_TYPE_READ;
3460 break;
3461 case DMA_FROM_DEVICE:
3462 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
3463 sbtype = SBAL_FLAGS0_TYPE_READ;
3464 fcp_cmnd_iu->rddata = 1;
3465 break;
3466 case DMA_TO_DEVICE:
3467 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
3468 sbtype = SBAL_FLAGS0_TYPE_WRITE;
3469 fcp_cmnd_iu->wddata = 1;
3470 break;
3471 case DMA_BIDIRECTIONAL:
3472 default:
3473 /*
3474 * dummy, catch this condition earlier
3475 * in zfcp_scsi_queuecommand
3476 */
3477 goto failed_scsi_cmnd;
3478 }
3479
3480 /* set FC service class in QTCB (3 per default) */
3481 fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
3482
3483 /* set FCP_LUN in FCP_CMND IU in QTCB */
3484 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3485
3486 mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED;
3487
3488 /* set task attributes in FCP_CMND IU in QTCB */
3489 if (likely((scsi_cmnd->device->simple_tags) ||
3490 (atomic_test_mask(mask, &unit->status))))
3491 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
3492 else
3493 fcp_cmnd_iu->task_attribute = UNTAGGED;
3494
3495 /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
3496 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) {
3497 fcp_cmnd_iu->add_fcp_cdb_length
3498 = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
3499 ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
3500 "additional FCP_CDB length is 0x%x "
3501 "(shifted right 2 bits)\n",
3502 scsi_cmnd->cmd_len,
3503 fcp_cmnd_iu->add_fcp_cdb_length);
3504 }
3505 /*
3506 * copy SCSI CDB (including additional length, if any) to
3507 * FCP_CDB in FCP_CMND IU in QTCB
3508 */
3509 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3510
3511 /* FCP CMND IU length in QTCB */
3512 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3513 sizeof (struct fcp_cmnd_iu) +
3514 fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
3515 1967
3516 /* generate SBALEs from data buffer */ 1968 spin_lock(&adapter->req_q.lock);
3517 real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd); 1969 if (zfcp_fsf_req_sbal_get(adapter))
3518 if (unlikely(real_bytes < 0)) { 1970 goto out;
3519 if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) { 1971 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
3520 ZFCP_LOG_DEBUG( 1972 ZFCP_REQ_AUTO_CLEANUP,
3521 "Data did not fit into available buffer(s), " 1973 adapter->pool.fsf_req_erp);
3522 "waiting for more...\n"); 1974 if (unlikely(IS_ERR(req))) {
3523 retval = -EIO; 1975 retval = PTR_ERR(req);
3524 } else { 1976 goto out;
3525 ZFCP_LOG_NORMAL("error: No truncation implemented but "
3526 "required. Shutting down unit "
3527 "(adapter %s, port 0x%016Lx, "
3528 "unit 0x%016Lx)\n",
3529 zfcp_get_busid_by_unit(unit),
3530 unit->port->wwpn,
3531 unit->fcp_lun);
3532 zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req);
3533 retval = -EINVAL;
3534 }
3535 goto no_fit;
3536 } 1977 }
3537 1978
3538 /* set length of FCP data length in FCP_CMND IU in QTCB */ 1979 sbale = zfcp_qdio_sbale_req(req);
3539 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); 1980 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1981 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3540 1982
3541 ZFCP_LOG_DEBUG("Sending SCSI command:\n"); 1983 req->qtcb->header.port_handle = erp_action->port->handle;
3542 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 1984 req->qtcb->header.lun_handle = erp_action->unit->handle;
3543 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 1985 req->handler = zfcp_fsf_close_unit_handler;
1986 req->data = erp_action->unit;
1987 req->erp_action = erp_action;
1988 erp_action->fsf_req = req;
1989 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3544 1990
3545 if (use_timer) 1991 zfcp_fsf_start_erp_timer(req);
3546 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1992 retval = zfcp_fsf_req_send(req);
3547 1993 if (retval) {
3548 retval = zfcp_fsf_req_send(fsf_req); 1994 zfcp_fsf_req_free(req);
3549 if (unlikely(retval < 0)) { 1995 erp_action->fsf_req = NULL;
3550 ZFCP_LOG_INFO("error: Could not send FCP command request "
3551 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
3552 zfcp_get_busid_by_adapter(adapter),
3553 unit->port->wwpn,
3554 unit->fcp_lun);
3555 goto send_failed;
3556 } 1996 }
3557 1997out:
3558 ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, " 1998 spin_unlock(&adapter->req_q.lock);
3559 "port 0x%016Lx, unit 0x%016Lx)\n",
3560 zfcp_get_busid_by_adapter(adapter),
3561 unit->port->wwpn,
3562 unit->fcp_lun);
3563 goto success;
3564
3565 send_failed:
3566 no_fit:
3567 failed_scsi_cmnd:
3568 zfcp_unit_put(unit);
3569 unit_blocked:
3570 zfcp_fsf_req_free(fsf_req);
3571 fsf_req = NULL;
3572 scsi_cmnd->host_scribble = NULL;
3573 success:
3574 failed_req_create:
3575 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3576 return retval; 1999 return retval;
3577} 2000}
3578 2001
3579struct zfcp_fsf_req * 2002static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
3580zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3581 struct zfcp_unit *unit,
3582 u8 tm_flags, int req_flags)
3583{ 2003{
3584 struct zfcp_fsf_req *fsf_req = NULL; 2004 lat_rec->sum += lat;
3585 int retval = 0; 2005 lat_rec->min = min(lat_rec->min, lat);
3586 struct fcp_cmnd_iu *fcp_cmnd_iu; 2006 lat_rec->max = max(lat_rec->max, lat);
3587 unsigned long lock_flags;
3588 volatile struct qdio_buffer_element *sbale;
3589
3590 /* setup new FSF request */
3591 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3592 adapter->pool.fsf_req_scsi,
3593 &lock_flags, &fsf_req);
3594 if (retval < 0) {
3595 ZFCP_LOG_INFO("error: Could not create FCP command (task "
3596 "management) request for adapter %s, port "
3597 " 0x%016Lx, unit 0x%016Lx.\n",
3598 zfcp_get_busid_by_adapter(adapter),
3599 unit->port->wwpn, unit->fcp_lun);
3600 goto out;
3601 }
3602
3603 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
3604 &unit->status)))
3605 goto unit_blocked;
3606
3607 /*
3608 * Used to decide on proper handler in the return path,
3609 * could be either zfcp_fsf_send_fcp_command_task_handler or
3610 * zfcp_fsf_send_fcp_command_task_management_handler */
3611
3612 fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
3613
3614 /*
3615 * hold a pointer to the unit being target of this
3616 * task management request
3617 */
3618 fsf_req->data = (unsigned long) unit;
3619
3620 /* set FSF related fields in QTCB */
3621 fsf_req->qtcb->header.lun_handle = unit->handle;
3622 fsf_req->qtcb->header.port_handle = unit->port->handle;
3623 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3624 fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
3625 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3626 sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
3627
3628 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3629 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
3630 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3631
3632 /* set FCP related fields in FCP_CMND IU in QTCB */
3633 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3634 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3635 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3636 fcp_cmnd_iu->task_management_flags = tm_flags;
3637
3638 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
3639 retval = zfcp_fsf_req_send(fsf_req);
3640 if (!retval)
3641 goto out;
3642
3643 unit_blocked:
3644 zfcp_fsf_req_free(fsf_req);
3645 fsf_req = NULL;
3646
3647 out:
3648 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3649 return fsf_req;
3650} 2007}
3651 2008
3652/* 2009static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
3653 * function: zfcp_fsf_send_fcp_command_handler
3654 *
3655 * purpose: is called for finished Send FCP Command
3656 *
3657 * returns:
3658 */
3659static int
3660zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3661{ 2010{
3662 int retval = -EINVAL; 2011 struct fsf_qual_latency_info *lat_inf;
3663 struct zfcp_unit *unit; 2012 struct latency_cont *lat;
3664 struct fsf_qtcb_header *header; 2013 struct zfcp_unit *unit = req->unit;
3665 u16 subtable, rule, counter; 2014 unsigned long flags;
3666
3667 header = &fsf_req->qtcb->header;
3668
3669 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3670 unit = (struct zfcp_unit *) fsf_req->data;
3671 else
3672 unit = fsf_req->unit;
3673
3674 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3675 /* go directly to calls of special handlers */
3676 goto skip_fsfstatus;
3677 }
3678
3679 /* evaluate FSF status in QTCB */
3680 switch (header->fsf_status) {
3681
3682 case FSF_PORT_HANDLE_NOT_VALID:
3683 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
3684 "0x%016Lx on adapter %s invalid\n",
3685 unit->port->handle,
3686 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3687 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3688 (char *) &header->fsf_status_qual,
3689 sizeof (union fsf_status_qual));
3690 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req);
3691 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3692 break;
3693
3694 case FSF_LUN_HANDLE_NOT_VALID:
3695 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit "
3696 "0x%016Lx on port 0x%016Lx on adapter %s is "
3697 "invalid. This may happen occasionally.\n",
3698 unit->handle,
3699 unit->fcp_lun,
3700 unit->port->wwpn,
3701 zfcp_get_busid_by_unit(unit));
3702 ZFCP_LOG_NORMAL("Status qualifier data:\n");
3703 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3704 (char *) &header->fsf_status_qual,
3705 sizeof (union fsf_status_qual));
3706 zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req);
3707 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3708 break;
3709
3710 case FSF_HANDLE_MISMATCH:
3711 ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed "
3712 "unexpectedly. (adapter %s, port 0x%016Lx, "
3713 "unit 0x%016Lx)\n",
3714 unit->port->handle,
3715 zfcp_get_busid_by_unit(unit),
3716 unit->port->wwpn,
3717 unit->fcp_lun);
3718 ZFCP_LOG_NORMAL("status qualifier:\n");
3719 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3720 (char *) &header->fsf_status_qual,
3721 sizeof (union fsf_status_qual));
3722 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req);
3723 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3724 break;
3725
3726 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
3727 ZFCP_LOG_INFO("error: adapter %s does not support fc "
3728 "class %d.\n",
3729 zfcp_get_busid_by_unit(unit),
3730 ZFCP_FC_SERVICE_CLASS_DEFAULT);
3731 /* stop operation for this adapter */
3732 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req);
3733 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3734 break;
3735
3736 case FSF_FCPLUN_NOT_VALID:
3737 ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on "
3738 "adapter %s does not have correct unit "
3739 "handle 0x%x\n",
3740 unit->fcp_lun,
3741 unit->port->wwpn,
3742 zfcp_get_busid_by_unit(unit),
3743 unit->handle);
3744 ZFCP_LOG_DEBUG("status qualifier:\n");
3745 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3746 (char *) &header->fsf_status_qual,
3747 sizeof (union fsf_status_qual));
3748 zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req);
3749 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3750 break;
3751
3752 case FSF_ACCESS_DENIED:
3753 ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to "
3754 "unit 0x%016Lx on port 0x%016Lx on "
3755 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
3756 zfcp_get_busid_by_unit(unit));
3757 for (counter = 0; counter < 2; counter++) {
3758 subtable = header->fsf_status_qual.halfword[counter * 2];
3759 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3760 switch (subtable) {
3761 case FSF_SQ_CFDC_SUBTABLE_OS:
3762 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3763 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3764 case FSF_SQ_CFDC_SUBTABLE_LUN:
3765 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3766 zfcp_act_subtable_type[subtable], rule);
3767 break;
3768 }
3769 }
3770 zfcp_erp_unit_access_denied(unit, 61, fsf_req);
3771 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3772 break;
3773
3774 case FSF_DIRECTION_INDICATOR_NOT_VALID:
3775 ZFCP_LOG_INFO("bug: Invalid data direction given for unit "
3776 "0x%016Lx on port 0x%016Lx on adapter %s "
3777 "(debug info %d)\n",
3778 unit->fcp_lun,
3779 unit->port->wwpn,
3780 zfcp_get_busid_by_unit(unit),
3781 fsf_req->qtcb->bottom.io.data_direction);
3782 /* stop operation for this adapter */
3783 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req);
3784 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3785 break;
3786
3787 case FSF_CMND_LENGTH_NOT_VALID:
3788 ZFCP_LOG_NORMAL
3789 ("bug: An invalid control-data-block length field "
3790 "was found in a command for unit 0x%016Lx on port "
3791 "0x%016Lx on adapter %s " "(debug info %d)\n",
3792 unit->fcp_lun, unit->port->wwpn,
3793 zfcp_get_busid_by_unit(unit),
3794 fsf_req->qtcb->bottom.io.fcp_cmnd_length);
3795 /* stop operation for this adapter */
3796 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req);
3797 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3798 break;
3799 2015
3800 case FSF_PORT_BOXED: 2016 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
3801 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
3802 "needs to be reopened\n",
3803 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3804 zfcp_erp_port_boxed(unit->port, 53, fsf_req);
3805 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3806 ZFCP_STATUS_FSFREQ_RETRY;
3807 break;
3808 2017
3809 case FSF_LUN_BOXED: 2018 switch (req->qtcb->bottom.io.data_direction) {
3810 ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, " 2019 case FSF_DATADIR_READ:
3811 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", 2020 lat = &unit->latencies.read;
3812 zfcp_get_busid_by_unit(unit),
3813 unit->port->wwpn, unit->fcp_lun);
3814 zfcp_erp_unit_boxed(unit, 54, fsf_req);
3815 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
3816 | ZFCP_STATUS_FSFREQ_RETRY;
3817 break;
3818
3819 case FSF_ADAPTER_STATUS_AVAILABLE:
3820 switch (header->fsf_status_qual.word[0]) {
3821 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3822 /* re-establish link to port */
3823 zfcp_test_link(unit->port);
3824 break;
3825 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3826 /* FIXME(hw) need proper specs for proper action */
3827 /* let scsi stack deal with retries and escalation */
3828 break;
3829 default:
3830 ZFCP_LOG_NORMAL
3831 ("Unknown status qualifier 0x%x arrived.\n",
3832 header->fsf_status_qual.word[0]);
3833 break;
3834 }
3835 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3836 break; 2021 break;
3837 2022 case FSF_DATADIR_WRITE:
3838 case FSF_GOOD: 2023 lat = &unit->latencies.write;
3839 break; 2024 break;
3840 2025 case FSF_DATADIR_CMND:
3841 case FSF_FCP_RSP_AVAILABLE: 2026 lat = &unit->latencies.cmd;
3842 break; 2027 break;
2028 default:
2029 return;
3843 } 2030 }
3844 2031
3845 skip_fsfstatus: 2032 spin_lock_irqsave(&unit->latencies.lock, flags);
3846 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) { 2033 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
3847 retval = 2034 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
3848 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); 2035 lat->counter++;
3849 } else { 2036 spin_unlock_irqrestore(&unit->latencies.lock, flags);
3850 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
3851 fsf_req->unit = NULL;
3852 zfcp_unit_put(unit);
3853 }
3854 return retval;
3855} 2037}
3856 2038
3857/* 2039static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
3858 * function: zfcp_fsf_send_fcp_command_task_handler
3859 *
3860 * purpose: evaluates FCP_RSP IU
3861 *
3862 * returns:
3863 */
3864static int
3865zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
3866{ 2040{
3867 int retval = 0; 2041 struct scsi_cmnd *scpnt = req->data;
3868 struct scsi_cmnd *scpnt;
3869 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2042 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
3870 &(fsf_req->qtcb->bottom.io.fcp_rsp); 2043 &(req->qtcb->bottom.io.fcp_rsp);
3871 struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3872 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3873 u32 sns_len; 2044 u32 sns_len;
3874 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 2045 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
3875 unsigned long flags; 2046 unsigned long flags;
3876 struct zfcp_unit *unit = fsf_req->unit; 2047
3877 2048 if (unlikely(!scpnt))
3878 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); 2049 return;
3879 scpnt = (struct scsi_cmnd *) fsf_req->data; 2050
3880 if (unlikely(!scpnt)) { 2051 read_lock_irqsave(&req->adapter->abort_lock, flags);
3881 ZFCP_LOG_DEBUG 2052
3882 ("Command with fsf_req %p is not associated to " 2053 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
3883 "a scsi command anymore. Aborted?\n", fsf_req); 2054 set_host_byte(scpnt, DID_SOFT_ERROR);
3884 goto out; 2055 set_driver_byte(scpnt, SUGGEST_RETRY);
3885 }
3886 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
3887 /* FIXME: (design) mid-layer should handle DID_ABORT like
3888 * DID_SOFT_ERROR by retrying the request for devices
3889 * that allow retries.
3890 */
3891 ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
3892 set_host_byte(&scpnt->result, DID_SOFT_ERROR);
3893 set_driver_byte(&scpnt->result, SUGGEST_RETRY);
3894 goto skip_fsfstatus; 2056 goto skip_fsfstatus;
3895 } 2057 }
3896 2058
3897 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2059 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3898 ZFCP_LOG_DEBUG("Setting DID_ERROR\n"); 2060 set_host_byte(scpnt, DID_ERROR);
3899 set_host_byte(&scpnt->result, DID_ERROR);
3900 goto skip_fsfstatus; 2061 goto skip_fsfstatus;
3901 } 2062 }
3902 2063
3903 /* set message byte of result in SCSI command */ 2064 set_msg_byte(scpnt, COMMAND_COMPLETE);
3904 scpnt->result |= COMMAND_COMPLETE << 8;
3905 2065
3906 /*
3907 * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
3908 * of result in SCSI command
3909 */
3910 scpnt->result |= fcp_rsp_iu->scsi_status; 2066 scpnt->result |= fcp_rsp_iu->scsi_status;
3911 if (unlikely(fcp_rsp_iu->scsi_status)) {
3912 /* DEBUG */
3913 ZFCP_LOG_DEBUG("status for SCSI Command:\n");
3914 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3915 scpnt->cmnd, scpnt->cmd_len);
3916 ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
3917 fcp_rsp_iu->scsi_status);
3918 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3919 (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu));
3920 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3921 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
3922 fcp_rsp_iu->fcp_sns_len);
3923 }
3924 2067
3925 /* check FCP_RSP_INFO */ 2068 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2069 zfcp_fsf_req_latency(req);
2070
3926 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { 2071 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
3927 ZFCP_LOG_DEBUG("rsp_len is valid\n"); 2072 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
3928 switch (fcp_rsp_info[3]) { 2073 set_host_byte(scpnt, DID_OK);
3929 case RSP_CODE_GOOD: 2074 else {
3930 /* ok, continue */ 2075 set_host_byte(scpnt, DID_ERROR);
3931 ZFCP_LOG_TRACE("no failure or Task Management "
3932 "Function complete\n");
3933 set_host_byte(&scpnt->result, DID_OK);
3934 break;
3935 case RSP_CODE_LENGTH_MISMATCH:
3936 /* hardware bug */
3937 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
3938 "that the fibrechannel protocol data "
3939 "length differs from the burst length. "
3940 "The problem occured on unit 0x%016Lx "
3941 "on port 0x%016Lx on adapter %s",
3942 unit->fcp_lun,
3943 unit->port->wwpn,
3944 zfcp_get_busid_by_unit(unit));
3945 /* dump SCSI CDB as prepared by zfcp */
3946 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3947 (char *) &fsf_req->qtcb->
3948 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3949 set_host_byte(&scpnt->result, DID_ERROR);
3950 goto skip_fsfstatus;
3951 case RSP_CODE_FIELD_INVALID:
3952 /* driver or hardware bug */
3953 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
3954 "that the fibrechannel protocol data "
3955 "fields were incorrectly set up. "
3956 "The problem occured on the unit "
3957 "0x%016Lx on port 0x%016Lx on "
3958 "adapter %s",
3959 unit->fcp_lun,
3960 unit->port->wwpn,
3961 zfcp_get_busid_by_unit(unit));
3962 /* dump SCSI CDB as prepared by zfcp */
3963 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3964 (char *) &fsf_req->qtcb->
3965 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3966 set_host_byte(&scpnt->result, DID_ERROR);
3967 goto skip_fsfstatus;
3968 case RSP_CODE_RO_MISMATCH:
3969 /* hardware bug */
3970 ZFCP_LOG_NORMAL("bug: The FCP response code indicates "
3971 "that conflicting values for the "
3972 "fibrechannel payload offset from the "
3973 "header were found. "
3974 "The problem occured on unit 0x%016Lx "
3975 "on port 0x%016Lx on adapter %s.\n",
3976 unit->fcp_lun,
3977 unit->port->wwpn,
3978 zfcp_get_busid_by_unit(unit));
3979 /* dump SCSI CDB as prepared by zfcp */
3980 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3981 (char *) &fsf_req->qtcb->
3982 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3983 set_host_byte(&scpnt->result, DID_ERROR);
3984 goto skip_fsfstatus;
3985 default:
3986 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
3987 "code was detected for a command. "
3988 "The problem occured on the unit "
3989 "0x%016Lx on port 0x%016Lx on "
3990 "adapter %s (debug info 0x%x)\n",
3991 unit->fcp_lun,
3992 unit->port->wwpn,
3993 zfcp_get_busid_by_unit(unit),
3994 fcp_rsp_info[3]);
3995 /* dump SCSI CDB as prepared by zfcp */
3996 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3997 (char *) &fsf_req->qtcb->
3998 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3999 set_host_byte(&scpnt->result, DID_ERROR);
4000 goto skip_fsfstatus; 2076 goto skip_fsfstatus;
4001 } 2077 }
4002 } 2078 }
4003 2079
4004 /* check for sense data */
4005 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { 2080 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
4006 sns_len = FSF_FCP_RSP_SIZE - 2081 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
4007 sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len; 2082 fcp_rsp_iu->fcp_rsp_len;
4008 ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n",
4009 sns_len);
4010 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE); 2083 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
4011 ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n",
4012 SCSI_SENSE_BUFFERSIZE);
4013 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len); 2084 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
4015 scpnt->result);
4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4017 scpnt->cmnd, scpnt->cmd_len);
4018 2085
4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
4020 fcp_rsp_iu->fcp_sns_len);
4021 memcpy(scpnt->sense_buffer, 2086 memcpy(scpnt->sense_buffer,
4022 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len); 2087 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
4023 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4024 (void *)scpnt->sense_buffer, sns_len);
4025 }
4026
4027 /* check for overrun */
4028 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) {
4029 ZFCP_LOG_INFO("A data overrun was detected for a command. "
4030 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4031 "The response data length is "
4032 "%d, the original length was %d.\n",
4033 unit->fcp_lun,
4034 unit->port->wwpn,
4035 zfcp_get_busid_by_unit(unit),
4036 fcp_rsp_iu->fcp_resid,
4037 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4038 } 2088 }
4039 2089
4040 /* check for underrun */
4041 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) { 2090 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
4042 ZFCP_LOG_INFO("A data underrun was detected for a command. "
4043 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4044 "The response data length is "
4045 "%d, the original length was %d.\n",
4046 unit->fcp_lun,
4047 unit->port->wwpn,
4048 zfcp_get_busid_by_unit(unit),
4049 fcp_rsp_iu->fcp_resid,
4050 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4051
4052 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); 2091 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
4053 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < 2092 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
4054 scpnt->underflow) 2093 scpnt->underflow)
4055 set_host_byte(&scpnt->result, DID_ERROR); 2094 set_host_byte(scpnt, DID_ERROR);
4056 } 2095 }
4057 2096skip_fsfstatus:
4058 skip_fsfstatus:
4059 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4060
4061 if (scpnt->result != 0) 2097 if (scpnt->result != 0)
4062 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); 2098 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
4063 else if (scpnt->retries > 0) 2099 else if (scpnt->retries > 0)
4064 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); 2100 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
4065 else 2101 else
4066 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); 2102 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
4067 2103
4068 /* cleanup pointer (need this especially for abort) */
4069 scpnt->host_scribble = NULL; 2104 scpnt->host_scribble = NULL;
4070
4071 /* always call back */
4072 (scpnt->scsi_done) (scpnt); 2105 (scpnt->scsi_done) (scpnt);
4073
4074 /* 2106 /*
4075 * We must hold this lock until scsi_done has been called. 2107 * We must hold this lock until scsi_done has been called.
4076 * Otherwise we may call scsi_done after abort regarding this 2108 * Otherwise we may call scsi_done after abort regarding this
4077 * command has completed. 2109 * command has completed.
4078 * Note: scsi_done must not block! 2110 * Note: scsi_done must not block!
4079 */ 2111 */
4080 out: 2112 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
4081 read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
4082 return retval;
4083} 2113}
4084 2114
4085/* 2115static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
4086 * function: zfcp_fsf_send_fcp_command_task_management_handler
4087 *
4088 * purpose: evaluates FCP_RSP IU
4089 *
4090 * returns:
4091 */
4092static int
4093zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4094{ 2116{
4095 int retval = 0;
4096 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2117 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4097 &(fsf_req->qtcb->bottom.io.fcp_rsp); 2118 &(req->qtcb->bottom.io.fcp_rsp);
4098 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 2119 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
4099 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4100
4101 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4102 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4103 goto skip_fsfstatus;
4104 }
4105 2120
4106 /* check FCP_RSP_INFO */ 2121 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
4107 switch (fcp_rsp_info[3]) { 2122 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
4108 case RSP_CODE_GOOD: 2123 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4109 /* ok, continue */
4110 ZFCP_LOG_DEBUG("no failure or Task Management "
4111 "Function complete\n");
4112 break;
4113 case RSP_CODE_TASKMAN_UNSUPP:
4114 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4115 "is not supported on the target device "
4116 "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ",
4117 unit->fcp_lun,
4118 unit->port->wwpn,
4119 zfcp_get_busid_by_unit(unit));
4120 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
4121 break;
4122 case RSP_CODE_TASKMAN_FAILED:
4123 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4124 "failed to complete successfully. "
4125 "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n",
4126 unit->fcp_lun,
4127 unit->port->wwpn,
4128 zfcp_get_busid_by_unit(unit));
4129 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4130 break;
4131 default:
4132 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
4133 "code was detected for a command. "
4134 "unit 0x%016Lx, port 0x%016Lx, adapter %s "
4135 "(debug info 0x%x)\n",
4136 unit->fcp_lun,
4137 unit->port->wwpn,
4138 zfcp_get_busid_by_unit(unit),
4139 fcp_rsp_info[3]);
4140 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4141 }
4142
4143 skip_fsfstatus:
4144 return retval;
4145} 2124}
4146 2125
4147 2126
4148/* 2127static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
4149 * function: zfcp_fsf_control_file
4150 *
4151 * purpose: Initiator of the control file upload/download FSF requests
4152 *
4153 * returns: 0 - FSF request is successfuly created and queued
4154 * -EOPNOTSUPP - The FCP adapter does not have Control File support
4155 * -EINVAL - Invalid direction specified
4156 * -ENOMEM - Insufficient memory
4157 * -EPERM - Cannot create FSF request or place it in QDIO queue
4158 */
4159int
4160zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4161 struct zfcp_fsf_req **fsf_req_ptr,
4162 u32 fsf_command,
4163 u32 option,
4164 struct zfcp_sg_list *sg_list)
4165{ 2128{
4166 struct zfcp_fsf_req *fsf_req; 2129 struct zfcp_unit *unit;
4167 struct fsf_qtcb_bottom_support *bottom; 2130 struct fsf_qtcb_header *header = &req->qtcb->header;
4168 volatile struct qdio_buffer_element *sbale;
4169 unsigned long lock_flags;
4170 int req_flags = 0;
4171 int direction;
4172 int retval = 0;
4173
4174 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
4175 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4176 zfcp_get_busid_by_adapter(adapter));
4177 retval = -EOPNOTSUPP;
4178 goto out;
4179 }
4180
4181 switch (fsf_command) {
4182
4183 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
4184 direction = SBAL_FLAGS0_TYPE_WRITE;
4185 if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
4186 (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
4187 req_flags = ZFCP_WAIT_FOR_SBAL;
4188 break;
4189
4190 case FSF_QTCB_UPLOAD_CONTROL_FILE:
4191 direction = SBAL_FLAGS0_TYPE_READ;
4192 break;
4193
4194 default:
4195 ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
4196 retval = -EINVAL;
4197 goto out;
4198 }
4199
4200 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4201 NULL, &lock_flags, &fsf_req);
4202 if (retval < 0) {
4203 ZFCP_LOG_INFO("error: Could not create FSF request for the "
4204 "adapter %s\n",
4205 zfcp_get_busid_by_adapter(adapter));
4206 retval = -EPERM;
4207 goto unlock_queue_lock;
4208 }
4209
4210 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4211 sbale[0].flags |= direction;
4212
4213 bottom = &fsf_req->qtcb->bottom.support;
4214 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
4215 bottom->option = option;
4216
4217 if (sg_list->count > 0) {
4218 int bytes;
4219
4220 bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
4221 sg_list->sg, sg_list->count,
4222 ZFCP_MAX_SBALS_PER_REQ);
4223 if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
4224 ZFCP_LOG_INFO(
4225 "error: Could not create sufficient number of "
4226 "SBALS for an FSF request to the adapter %s\n",
4227 zfcp_get_busid_by_adapter(adapter));
4228 retval = -ENOMEM;
4229 goto free_fsf_req;
4230 }
4231 } else
4232 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4233
4234 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
4235 retval = zfcp_fsf_req_send(fsf_req);
4236 if (retval < 0) {
4237 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4238 "(adapter %s)\n",
4239 zfcp_get_busid_by_adapter(adapter));
4240 retval = -EPERM;
4241 goto free_fsf_req;
4242 }
4243 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4244
4245 ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the "
4246 "adapter %s\n",
4247 fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
4248 "download" : "upload",
4249 zfcp_get_busid_by_adapter(adapter));
4250
4251 wait_event(fsf_req->completion_wq,
4252 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4253
4254 *fsf_req_ptr = fsf_req;
4255 goto out;
4256
4257 free_fsf_req:
4258 zfcp_fsf_req_free(fsf_req);
4259 unlock_queue_lock:
4260 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4261 out:
4262 return retval;
4263}
4264
4265 2131
4266/* 2132 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
4267 * function: zfcp_fsf_control_file_handler 2133 unit = req->data;
4268 * 2134 else
4269 * purpose: Handler of the control file upload/download FSF requests 2135 unit = req->unit;
4270 *
4271 * returns: 0 - FSF request successfuly processed
4272 * -EAGAIN - Operation has to be repeated because of a temporary problem
4273 * -EACCES - There is no permission to execute an operation
4274 * -EPERM - The control file is not in a right format
4275 * -EIO - There is a problem with the FCP adapter
4276 * -EINVAL - Invalid operation
4277 * -EFAULT - User space memory I/O operation fault
4278 */
4279static int
4280zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req)
4281{
4282 struct zfcp_adapter *adapter = fsf_req->adapter;
4283 struct fsf_qtcb_header *header = &fsf_req->qtcb->header;
4284 struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support;
4285 int retval = 0;
4286 2136
4287 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2137 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
4288 retval = -EINVAL;
4289 goto skip_fsfstatus; 2138 goto skip_fsfstatus;
4290 }
4291 2139
4292 switch (header->fsf_status) { 2140 switch (header->fsf_status) {
4293 2141 case FSF_HANDLE_MISMATCH:
4294 case FSF_GOOD: 2142 case FSF_PORT_HANDLE_NOT_VALID:
4295 ZFCP_LOG_NORMAL( 2143 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
4296 "The FSF request has been successfully completed " 2144 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4297 "on the adapter %s\n",
4298 zfcp_get_busid_by_adapter(adapter));
4299 break;
4300
4301 case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
4302 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
4303 switch (header->fsf_status_qual.word[0]) {
4304
4305 case FSF_SQ_CFDC_HARDENED_ON_SE:
4306 ZFCP_LOG_NORMAL(
4307 "CFDC on the adapter %s has being "
4308 "hardened on primary and secondary SE\n",
4309 zfcp_get_busid_by_adapter(adapter));
4310 break;
4311
4312 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
4313 ZFCP_LOG_NORMAL(
4314 "CFDC of the adapter %s could not "
4315 "be saved on the SE\n",
4316 zfcp_get_busid_by_adapter(adapter));
4317 break;
4318
4319 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
4320 ZFCP_LOG_NORMAL(
4321 "CFDC of the adapter %s could not "
4322 "be copied to the secondary SE\n",
4323 zfcp_get_busid_by_adapter(adapter));
4324 break;
4325
4326 default:
4327 ZFCP_LOG_NORMAL(
4328 "CFDC could not be hardened "
4329 "on the adapter %s\n",
4330 zfcp_get_busid_by_adapter(adapter));
4331 }
4332 }
4333 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4334 retval = -EAGAIN;
4335 break;
4336
4337 case FSF_AUTHORIZATION_FAILURE:
4338 ZFCP_LOG_NORMAL(
4339 "Adapter %s does not accept privileged commands\n",
4340 zfcp_get_busid_by_adapter(adapter));
4341 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4342 retval = -EACCES;
4343 break; 2145 break;
4344 2146 case FSF_FCPLUN_NOT_VALID:
4345 case FSF_CFDC_ERROR_DETECTED: 2147 case FSF_LUN_HANDLE_NOT_VALID:
4346 ZFCP_LOG_NORMAL( 2148 zfcp_erp_port_reopen(unit->port, 0, 113, req);
4347 "Error at position %d in the CFDC, " 2149 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4348 "CFDC is discarded by the adapter %s\n",
4349 header->fsf_status_qual.word[0],
4350 zfcp_get_busid_by_adapter(adapter));
4351 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4352 retval = -EPERM;
4353 break; 2150 break;
4354 2151 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
4355 case FSF_CONTROL_FILE_UPDATE_ERROR: 2152 zfcp_fsf_class_not_supp(req);
4356 ZFCP_LOG_NORMAL(
4357 "Adapter %s cannot harden the control file, "
4358 "file is discarded\n",
4359 zfcp_get_busid_by_adapter(adapter));
4360 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4361 retval = -EIO;
4362 break; 2153 break;
4363 2154 case FSF_ACCESS_DENIED:
4364 case FSF_CONTROL_FILE_TOO_LARGE: 2155 zfcp_fsf_access_denied_unit(req, unit);
4365 ZFCP_LOG_NORMAL(
4366 "Control file is too large, file is discarded "
4367 "by the adapter %s\n",
4368 zfcp_get_busid_by_adapter(adapter));
4369 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4370 retval = -EIO;
4371 break; 2156 break;
4372 2157 case FSF_DIRECTION_INDICATOR_NOT_VALID:
4373 case FSF_ACCESS_CONFLICT_DETECTED: 2158 dev_err(&req->adapter->ccw_device->dev,
4374 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) 2159 "Invalid data direction (%d) given for unit "
4375 ZFCP_LOG_NORMAL( 2160 "0x%016Lx on port 0x%016Lx, shutting down "
4376 "CFDC has been discarded by the adapter %s, " 2161 "adapter.\n",
4377 "because activation would impact " 2162 req->qtcb->bottom.io.data_direction,
4378 "%d active connection(s)\n", 2163 unit->fcp_lun, unit->port->wwpn);
4379 zfcp_get_busid_by_adapter(adapter), 2164 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
4380 header->fsf_status_qual.word[0]); 2165 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4381 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4382 retval = -EIO;
4383 break; 2166 break;
4384 2167 case FSF_CMND_LENGTH_NOT_VALID:
4385 case FSF_CONFLICTS_OVERRULED: 2168 dev_err(&req->adapter->ccw_device->dev,
4386 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) 2169 "An invalid control-data-block length field (%d) "
4387 ZFCP_LOG_NORMAL( 2170 "was found in a command for unit 0x%016Lx on port "
4388 "CFDC has been activated on the adapter %s, " 2171 "0x%016Lx. Shutting down adapter.\n",
4389 "but activation has impacted " 2172 req->qtcb->bottom.io.fcp_cmnd_length,
4390 "%d active connection(s)\n", 2173 unit->fcp_lun, unit->port->wwpn);
4391 zfcp_get_busid_by_adapter(adapter), 2174 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
4392 header->fsf_status_qual.word[0]); 2175 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4393 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4394 retval = -EIO;
4395 break; 2176 break;
4396 2177 case FSF_PORT_BOXED:
4397 case FSF_UNKNOWN_OP_SUBTYPE: 2178 zfcp_erp_port_boxed(unit->port, 53, req);
4398 ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, " 2179 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4399 "op_subtype=0x%x)\n", 2180 ZFCP_STATUS_FSFREQ_RETRY;
4400 zfcp_get_busid_by_adapter(adapter),
4401 bottom->operation_subtype);
4402 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4403 retval = -EINVAL;
4404 break; 2181 break;
4405 2182 case FSF_LUN_BOXED:
4406 case FSF_INVALID_COMMAND_OPTION: 2183 zfcp_erp_unit_boxed(unit, 54, req);
4407 ZFCP_LOG_NORMAL( 2184 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4408 "Invalid option 0x%x has been specified " 2185 ZFCP_STATUS_FSFREQ_RETRY;
4409 "in QTCB bottom sent to the adapter %s\n",
4410 bottom->option,
4411 zfcp_get_busid_by_adapter(adapter));
4412 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4413 retval = -EINVAL;
4414 break; 2186 break;
4415 2187 case FSF_ADAPTER_STATUS_AVAILABLE:
4416 default: 2188 if (header->fsf_status_qual.word[0] ==
4417 ZFCP_LOG_NORMAL( 2189 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
4418 "bug: An unknown/unexpected FSF status 0x%08x " 2190 zfcp_test_link(unit->port);
4419 "was presented on the adapter %s\n", 2191 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4420 header->fsf_status,
4421 zfcp_get_busid_by_adapter(adapter));
4422 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4423 retval = -EINVAL;
4424 break; 2192 break;
4425 } 2193 }
4426
4427skip_fsfstatus: 2194skip_fsfstatus:
4428 return retval; 2195 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
4429} 2196 zfcp_fsf_send_fcp_ctm_handler(req);
4430 2197 else {
4431static inline int 2198 zfcp_fsf_send_fcp_command_task_handler(req);
4432zfcp_fsf_req_sbal_check(unsigned long *flags, 2199 req->unit = NULL;
4433 struct zfcp_qdio_queue *queue, int needed) 2200 zfcp_unit_put(unit);
4434{
4435 write_lock_irqsave(&queue->queue_lock, *flags);
4436 if (likely(atomic_read(&queue->free_count) >= needed))
4437 return 1;
4438 write_unlock_irqrestore(&queue->queue_lock, *flags);
4439 return 0;
4440}
4441
4442/*
4443 * set qtcb pointer in fsf_req and initialize QTCB
4444 */
4445static void
4446zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4447{
4448 if (likely(fsf_req->qtcb != NULL)) {
4449 fsf_req->qtcb->prefix.req_seq_no =
4450 fsf_req->adapter->fsf_req_seq_no;
4451 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4452 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4453 fsf_req->qtcb->prefix.qtcb_type =
4454 fsf_qtcb_type[fsf_req->fsf_command];
4455 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4456 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4457 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4458 } 2201 }
4459} 2202}
4460 2203
4461/** 2204/**
4462 * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue 2205 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
4463 * @adapter: adapter for which request queue is examined 2206 * @adapter: adapter where scsi command is issued
4464 * @req_flags: flags indicating whether to wait for needed SBAL or not 2207 * @unit: unit where command is sent to
4465 * @lock_flags: lock_flags if queue_lock is taken 2208 * @scsi_cmnd: scsi command to be sent
4466 * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS 2209 * @timer: timer to be started when request is initiated
4467 * Locks: lock adapter->request_queue->queue_lock on success 2210 * @req_flags: flags for fsf_request
4468 */
4469static int
4470zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
4471 unsigned long *lock_flags)
4472{
4473 long ret;
4474 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4475
4476 if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
4477 ret = wait_event_interruptible_timeout(adapter->request_wq,
4478 zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
4479 ZFCP_SBAL_TIMEOUT);
4480 if (ret < 0)
4481 return ret;
4482 if (!ret)
4483 return -EIO;
4484 } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
4485 return -EIO;
4486
4487 return 0;
4488}
4489
4490/*
4491 * function: zfcp_fsf_req_create
4492 *
4493 * purpose: create an FSF request at the specified adapter and
4494 * setup common fields
4495 *
4496 * returns: -ENOMEM if there was insufficient memory for a request
4497 * -EIO if no qdio buffers could be allocate to the request
4498 * -EINVAL/-EPERM on bug conditions in req_dequeue
4499 * 0 in success
4500 *
4501 * note: The created request is returned by reference.
4502 *
4503 * locks: lock of concerned request queue must not be held,
4504 * but is held on completion (write, irqsave)
4505 */ 2211 */
4506int 2212int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
4507zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, 2213 struct zfcp_unit *unit,
4508 mempool_t *pool, unsigned long *lock_flags, 2214 struct scsi_cmnd *scsi_cmnd,
4509 struct zfcp_fsf_req **fsf_req_p) 2215 int use_timer, int req_flags)
4510{ 2216{
4511 volatile struct qdio_buffer_element *sbale; 2217 struct zfcp_fsf_req *req;
4512 struct zfcp_fsf_req *fsf_req = NULL; 2218 struct fcp_cmnd_iu *fcp_cmnd_iu;
4513 int ret = 0; 2219 unsigned int sbtype;
4514 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 2220 int real_bytes, retval = -EIO;
4515
4516 /* allocate new FSF request */
4517 fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
4518 if (unlikely(NULL == fsf_req)) {
4519 ZFCP_LOG_DEBUG("error: Could not put an FSF request into "
4520 "the outbound (send) queue.\n");
4521 ret = -ENOMEM;
4522 goto failed_fsf_req;
4523 }
4524
4525 fsf_req->adapter = adapter;
4526 fsf_req->fsf_command = fsf_cmd;
4527 INIT_LIST_HEAD(&fsf_req->list);
4528 init_timer(&fsf_req->timer);
4529 2221
4530 /* initialize waitqueue which may be used to wait on 2222 if (unlikely(!(atomic_read(&unit->status) &
4531 this request completion */ 2223 ZFCP_STATUS_COMMON_UNBLOCKED)))
4532 init_waitqueue_head(&fsf_req->completion_wq); 2224 return -EBUSY;
4533 2225
4534 ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags); 2226 spin_lock(&adapter->req_q.lock);
4535 if (ret < 0) 2227 if (!atomic_read(&adapter->req_q.count))
4536 goto failed_sbals; 2228 goto out;
2229 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2230 adapter->pool.fsf_req_scsi);
2231 if (unlikely(IS_ERR(req))) {
2232 retval = PTR_ERR(req);
2233 goto out;
2234 }
4537 2235
4538 /* this is serialized (we are holding req_queue-lock of adapter) */ 2236 zfcp_unit_get(unit);
4539 if (adapter->req_no == 0) 2237 req->unit = unit;
4540 adapter->req_no++; 2238 req->data = scsi_cmnd;
4541 fsf_req->req_id = adapter->req_no++; 2239 req->handler = zfcp_fsf_send_fcp_command_handler;
2240 req->qtcb->header.lun_handle = unit->handle;
2241 req->qtcb->header.port_handle = unit->port->handle;
2242 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
4542 2243
4543 zfcp_fsf_req_qtcb_init(fsf_req); 2244 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
4544 2245
2246 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2247 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
4545 /* 2248 /*
4546 * We hold queue_lock here. Check if QDIOUP is set and let request fail 2249 * set depending on data direction:
4547 * if it is not set (see also *_open_qdio and *_close_qdio). 2250 * data direction bits in SBALE (SB Type)
2251 * data direction bits in QTCB
2252 * data direction bits in FCP_CMND IU
4548 */ 2253 */
4549 2254 switch (scsi_cmnd->sc_data_direction) {
4550 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { 2255 case DMA_NONE:
4551 write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags); 2256 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
4552 ret = -EIO; 2257 sbtype = SBAL_FLAGS0_TYPE_READ;
4553 goto failed_sbals; 2258 break;
2259 case DMA_FROM_DEVICE:
2260 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2261 sbtype = SBAL_FLAGS0_TYPE_READ;
2262 fcp_cmnd_iu->rddata = 1;
2263 break;
2264 case DMA_TO_DEVICE:
2265 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2266 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2267 fcp_cmnd_iu->wddata = 1;
2268 break;
2269 case DMA_BIDIRECTIONAL:
2270 default:
2271 retval = -EIO;
2272 goto failed_scsi_cmnd;
4554 } 2273 }
4555 2274
4556 if (fsf_req->qtcb) { 2275 if (likely((scsi_cmnd->device->simple_tags) ||
4557 fsf_req->seq_no = adapter->fsf_req_seq_no; 2276 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
4558 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 2277 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
4559 } 2278 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
4560 fsf_req->sbal_number = 1; 2279 else
4561 fsf_req->sbal_first = req_queue->free_index; 2280 fcp_cmnd_iu->task_attribute = UNTAGGED;
4562 fsf_req->sbal_curr = req_queue->free_index;
4563 fsf_req->sbale_curr = 1;
4564 2281
4565 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) { 2282 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
4566 fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2283 fcp_cmnd_iu->add_fcp_cdb_length =
4567 } 2284 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
4568 2285
4569 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2286 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4570 2287
4571 /* setup common SBALE fields */ 2288 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
4572 sbale[0].addr = (void *) fsf_req->req_id; 2289 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t);
4573 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 2290
4574 if (likely(fsf_req->qtcb != NULL)) { 2291 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
4575 sbale[1].addr = (void *) fsf_req->qtcb; 2292 scsi_sglist(scsi_cmnd),
4576 sbale[1].length = sizeof(struct fsf_qtcb); 2293 FSF_MAX_SBALS_PER_REQ);
2294 if (unlikely(real_bytes < 0)) {
2295 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
2296 retval = -EIO;
2297 else {
2298 dev_err(&adapter->ccw_device->dev,
2299 "SCSI request too large. "
2300 "Shutting down unit 0x%016Lx on port "
2301 "0x%016Lx.\n", unit->fcp_lun,
2302 unit->port->wwpn);
2303 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2304 retval = -EINVAL;
2305 }
2306 goto failed_scsi_cmnd;
4577 } 2307 }
4578 2308
4579 ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n", 2309 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
4580 fsf_req->sbal_number, fsf_req->sbal_first);
4581 2310
4582 goto success; 2311 if (use_timer)
2312 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
4583 2313
4584 failed_sbals: 2314 retval = zfcp_fsf_req_send(req);
4585/* dequeue new FSF request previously enqueued */ 2315 if (unlikely(retval))
4586 zfcp_fsf_req_free(fsf_req); 2316 goto failed_scsi_cmnd;
4587 fsf_req = NULL;
4588 2317
4589 failed_fsf_req: 2318 goto out;
4590 write_lock_irqsave(&req_queue->queue_lock, *lock_flags); 2319
4591 success: 2320failed_scsi_cmnd:
4592 *fsf_req_p = fsf_req; 2321 zfcp_unit_put(unit);
4593 return ret; 2322 zfcp_fsf_req_free(req);
2323 scsi_cmnd->host_scribble = NULL;
2324out:
2325 spin_unlock(&adapter->req_q.lock);
2326 return retval;
4594} 2327}
4595 2328
4596/* 2329/**
4597 * function: zfcp_fsf_req_send 2330 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
4598 * 2331 * @adapter: pointer to struct zfcp-adapter
4599 * purpose: start transfer of FSF request via QDIO 2332 * @unit: pointer to struct zfcp_unit
4600 * 2333 * @tm_flags: unsigned byte for task management flags
4601 * returns: 0 - request transfer succesfully started 2334 * @req_flags: int request flags
4602 * !0 - start of request transfer failed 2335 * Returns: on success pointer to struct fsf_req, NULL otherwise
4603 */ 2336 */
4604static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) 2337struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2338 struct zfcp_unit *unit,
2339 u8 tm_flags, int req_flags)
4605{ 2340{
4606 struct zfcp_adapter *adapter;
4607 struct zfcp_qdio_queue *req_queue;
4608 volatile struct qdio_buffer_element *sbale; 2341 volatile struct qdio_buffer_element *sbale;
4609 int inc_seq_no; 2342 struct zfcp_fsf_req *req = NULL;
4610 int new_distance_from_int; 2343 struct fcp_cmnd_iu *fcp_cmnd_iu;
4611 int retval = 0;
4612 2344
4613 adapter = fsf_req->adapter; 2345 if (unlikely(!(atomic_read(&unit->status) &
4614 req_queue = &adapter->request_queue, 2346 ZFCP_STATUS_COMMON_UNBLOCKED)))
2347 return NULL;
4615 2348
2349 spin_lock(&adapter->req_q.lock);
2350 if (!atomic_read(&adapter->req_q.count))
2351 goto out;
2352 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2353 adapter->pool.fsf_req_scsi);
2354 if (unlikely(IS_ERR(req)))
2355 goto out;
4616 2356
4617 /* FIXME(debug): remove it later */ 2357 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
4618 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0); 2358 req->data = unit;
4619 ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags); 2359 req->handler = zfcp_fsf_send_fcp_command_handler;
4620 ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n"); 2360 req->qtcb->header.lun_handle = unit->handle;
4621 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 2361 req->qtcb->header.port_handle = unit->port->handle;
4622 sbale[1].length); 2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2365 sizeof(fcp_dl_t);
2366
2367 sbale = zfcp_qdio_sbale_req(req);
2368 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2369 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4623 2370
4624 /* put allocated FSF request into hash table */ 2371 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
4625 spin_lock(&adapter->req_list_lock); 2372 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
4626 zfcp_reqlist_add(adapter, fsf_req); 2373 fcp_cmnd_iu->task_management_flags = tm_flags;
4627 spin_unlock(&adapter->req_list_lock);
4628 2374
4629 inc_seq_no = (fsf_req->qtcb != NULL); 2375 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2376 if (!zfcp_fsf_req_send(req))
2377 goto out;
4630 2378
4631 ZFCP_LOG_TRACE("request queue of adapter %s: " 2379 zfcp_fsf_req_free(req);
4632 "next free SBAL is %i, %i free SBALs\n", 2380 req = NULL;
4633 zfcp_get_busid_by_adapter(adapter), 2381out:
4634 req_queue->free_index, 2382 spin_unlock(&adapter->req_q.lock);
4635 atomic_read(&req_queue->free_count)); 2383 return req;
2384}
4636 2385
4637 ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, " 2386static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
4638 "index_in_queue=%i, count=%i, buffers=%p\n", 2387{
4639 zfcp_get_busid_by_adapter(adapter), 2388 if (req->qtcb->header.fsf_status != FSF_GOOD)
4640 QDIO_FLAG_SYNC_OUTPUT, 2389 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4641 0, fsf_req->sbal_first, fsf_req->sbal_number, 2390}
4642 &req_queue->buffer[fsf_req->sbal_first]);
4643 2391
4644 /* 2392/**
4645 * adjust the number of free SBALs in request queue as well as 2393 * zfcp_fsf_control_file - control file upload/download
4646 * position of first one 2394 * @adapter: pointer to struct zfcp_adapter
4647 */ 2395 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
4648 atomic_sub(fsf_req->sbal_number, &req_queue->free_count); 2396 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
4649 ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count)); 2397 */
4650 req_queue->free_index += fsf_req->sbal_number; /* increase */ 2398struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4651 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ 2399 struct zfcp_fsf_cfdc *fsf_cfdc)
4652 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); 2400{
2401 volatile struct qdio_buffer_element *sbale;
2402 struct zfcp_fsf_req *req = NULL;
2403 struct fsf_qtcb_bottom_support *bottom;
2404 int direction, retval = -EIO, bytes;
2405
2406 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2407 return ERR_PTR(-EOPNOTSUPP);
2408
2409 switch (fsf_cfdc->command) {
2410 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2411 direction = SBAL_FLAGS0_TYPE_WRITE;
2412 break;
2413 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2414 direction = SBAL_FLAGS0_TYPE_READ;
2415 break;
2416 default:
2417 return ERR_PTR(-EINVAL);
2418 }
4653 2419
4654 fsf_req->issued = get_clock(); 2420 spin_lock(&adapter->req_q.lock);
2421 if (zfcp_fsf_req_sbal_get(adapter))
2422 goto out;
4655 2423
4656 retval = do_QDIO(adapter->ccw_device, 2424 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
4657 QDIO_FLAG_SYNC_OUTPUT, 2425 if (unlikely(IS_ERR(req))) {
4658 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 2426 retval = -EPERM;
2427 goto out;
2428 }
4659 2429
4660 if (unlikely(retval)) { 2430 req->handler = zfcp_fsf_control_file_handler;
4661 /* Queues are down..... */ 2431
4662 retval = -EIO; 2432 sbale = zfcp_qdio_sbale_req(req);
4663 del_timer(&fsf_req->timer); 2433 sbale[0].flags |= direction;
4664 spin_lock(&adapter->req_list_lock);
4665 zfcp_reqlist_remove(adapter, fsf_req);
4666 spin_unlock(&adapter->req_list_lock);
4667 /* undo changes in request queue made for this request */
4668 zfcp_qdio_zero_sbals(req_queue->buffer,
4669 fsf_req->sbal_first, fsf_req->sbal_number);
4670 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4671 req_queue->free_index -= fsf_req->sbal_number;
4672 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4673 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4674 zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req);
4675 } else {
4676 req_queue->distance_from_int = new_distance_from_int;
4677 /*
4678 * increase FSF sequence counter -
4679 * this must only be done for request successfully enqueued to
4680 * QDIO this rejected requests may be cleaned up by calling
4681 * routines resulting in missing sequence counter values
4682 * otherwise,
4683 */
4684 2434
4685 /* Don't increase for unsolicited status */ 2435 bottom = &req->qtcb->bottom.support;
4686 if (inc_seq_no) 2436 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
4687 adapter->fsf_req_seq_no++; 2437 bottom->option = fsf_cfdc->option;
4688 2438
4689 /* count FSF requests pending */ 2439 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
4690 atomic_inc(&adapter->reqs_active); 2440 FSF_MAX_SBALS_PER_REQ);
2441 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2442 retval = -ENOMEM;
2443 zfcp_fsf_req_free(req);
2444 goto out;
4691 } 2445 }
4692 return retval;
4693}
4694 2446
4695#undef ZFCP_LOG_AREA 2447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2448 retval = zfcp_fsf_req_send(req);
2449out:
2450 spin_unlock(&adapter->req_q.lock);
2451
2452 if (!retval) {
2453 wait_event(req->completion_wq,
2454 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2455 return req;
2456 }
2457 return ERR_PTR(retval);
2458}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 099970b27001..bf94b4da0763 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -1,27 +1,16 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Interface to the FSF support functions.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef FSF_H 9#ifndef FSF_H
23#define FSF_H 10#define FSF_H
24 11
12#include <linux/pfn.h>
13
25#define FSF_QTCB_CURRENT_VERSION 0x00000001 14#define FSF_QTCB_CURRENT_VERSION 0x00000001
26 15
27/* FSF commands */ 16/* FSF commands */
@@ -258,6 +247,16 @@
258#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 247#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
259#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 248#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
260 249
250/* FSF interface for CFDC */
251#define ZFCP_CFDC_MAX_SIZE 127 * 1024
252#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE)
253
254struct zfcp_fsf_cfdc {
255 struct scatterlist sg[ZFCP_CFDC_PAGES];
256 u32 command;
257 u32 option;
258};
259
261struct fsf_queue_designator { 260struct fsf_queue_designator {
262 u8 cssid; 261 u8 cssid;
263 u8 chpid; 262 u8 chpid;
@@ -288,6 +287,18 @@ struct fsf_bit_error_payload {
288 u32 current_transmit_b2b_credit; 287 u32 current_transmit_b2b_credit;
289} __attribute__ ((packed)); 288} __attribute__ ((packed));
290 289
290struct fsf_link_down_info {
291 u32 error_code;
292 u32 res1;
293 u8 res2[2];
294 u8 primary_status;
295 u8 ioerr_code;
296 u8 action_code;
297 u8 reason_code;
298 u8 explanation_code;
299 u8 vendor_specific_code;
300} __attribute__ ((packed));
301
291struct fsf_status_read_buffer { 302struct fsf_status_read_buffer {
292 u32 status_type; 303 u32 status_type;
293 u32 status_subtype; 304 u32 status_subtype;
@@ -298,7 +309,12 @@ struct fsf_status_read_buffer {
298 u32 class; 309 u32 class;
299 u64 fcp_lun; 310 u64 fcp_lun;
300 u8 res3[24]; 311 u8 res3[24];
301 u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE]; 312 union {
313 u8 data[FSF_STATUS_READ_PAYLOAD_SIZE];
314 u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
315 struct fsf_link_down_info link_down_info;
316 struct fsf_bit_error_payload bit_error;
317 } payload;
302} __attribute__ ((packed)); 318} __attribute__ ((packed));
303 319
304struct fsf_qual_version_error { 320struct fsf_qual_version_error {
@@ -311,23 +327,19 @@ struct fsf_qual_sequence_error {
311 u32 res1[3]; 327 u32 res1[3];
312} __attribute__ ((packed)); 328} __attribute__ ((packed));
313 329
314struct fsf_link_down_info { 330struct fsf_qual_latency_info {
315 u32 error_code; 331 u32 channel_lat;
316 u32 res1; 332 u32 fabric_lat;
317 u8 res2[2]; 333 u8 res1[8];
318 u8 primary_status;
319 u8 ioerr_code;
320 u8 action_code;
321 u8 reason_code;
322 u8 explanation_code;
323 u8 vendor_specific_code;
324} __attribute__ ((packed)); 334} __attribute__ ((packed));
325 335
326union fsf_prot_status_qual { 336union fsf_prot_status_qual {
337 u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
327 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)]; 338 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
328 struct fsf_qual_version_error version_error; 339 struct fsf_qual_version_error version_error;
329 struct fsf_qual_sequence_error sequence_error; 340 struct fsf_qual_sequence_error sequence_error;
330 struct fsf_link_down_info link_down_info; 341 struct fsf_link_down_info link_down_info;
342 struct fsf_qual_latency_info latency_info;
331} __attribute__ ((packed)); 343} __attribute__ ((packed));
332 344
333struct fsf_qtcb_prefix { 345struct fsf_qtcb_prefix {
@@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config {
437 u32 fc_link_speed; 449 u32 fc_link_speed;
438 u32 adapter_type; 450 u32 adapter_type;
439 u32 peer_d_id; 451 u32 peer_d_id;
440 u8 res2[12]; 452 u8 res1[2];
453 u16 timer_interval;
454 u8 res2[8];
441 u32 s_id; 455 u32 s_id;
442 struct fsf_nport_serv_param nport_serv_param; 456 struct fsf_nport_serv_param nport_serv_param;
443 u8 reserved_nport_serv_param[16]; 457 u8 reserved_nport_serv_param[16];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 8ca5f074c687..d6dbd653fde9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -1,241 +1,101 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Setup and helper functions to access QDIO.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 11/* FIXME(tune): free space should be one max. SBAL chain plus what? */
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 12#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
26 (struct zfcp_qdio_queue *, int, int); 13 - (FSF_MAX_SBALS_PER_REQ + 4))
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
28 (struct zfcp_fsf_req *, int, int);
29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long);
31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long);
33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39
40static qdio_handler_t zfcp_qdio_request_handler;
41static qdio_handler_t zfcp_qdio_response_handler;
42static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
43 unsigned int, unsigned int, unsigned int, int, int);
44
45#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
46
47/*
48 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
49 * in the adapter struct sbuf is the pointer array.
50 *
51 * locks: must only be called with zfcp_data.config_sema taken
52 */
53static void
54zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
55{
56 int pos;
57
58 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
59 free_page((unsigned long) sbuf[pos]);
60}
61 15
62/* 16static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
63 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
64 * array in the adapter struct.
65 * Cur_buf is the pointer array
66 *
67 * returns: zero on success else -ENOMEM
68 * locks: must only be called with zfcp_data.config_sema taken
69 */
70static int
71zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
72{ 17{
73 int pos; 18 int pos;
74 19
75 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { 20 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
76 sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); 21 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
77 if (!sbuf[pos]) { 22 if (!sbal[pos])
78 zfcp_qdio_buffers_dequeue(sbuf);
79 return -ENOMEM; 23 return -ENOMEM;
80 }
81 } 24 }
82 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) 25 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
83 if (pos % QBUFF_PER_PAGE) 26 if (pos % QBUFF_PER_PAGE)
84 sbuf[pos] = sbuf[pos - 1] + 1; 27 sbal[pos] = sbal[pos - 1] + 1;
85 return 0; 28 return 0;
86} 29}
87 30
88/* locks: must only be called with zfcp_data.config_sema taken */ 31static volatile struct qdio_buffer_element *
89int 32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
90zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
91{ 33{
92 int ret; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
93
94 ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
95 if (ret)
96 return ret;
97 return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
98} 35}
99 36
100/* locks: must only be called with zfcp_data.config_sema taken */ 37/**
101void 38 * zfcp_qdio_free - free memory used by request- and resposne queue
102zfcp_qdio_free_queues(struct zfcp_adapter *adapter) 39 * @adapter: pointer to the zfcp_adapter structure
40 */
41void zfcp_qdio_free(struct zfcp_adapter *adapter)
103{ 42{
104 ZFCP_LOG_TRACE("freeing request_queue buffers\n"); 43 struct qdio_buffer **sbal_req, **sbal_resp;
105 zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); 44 int p;
106 45
107 ZFCP_LOG_TRACE("freeing response_queue buffers\n"); 46 if (adapter->ccw_device)
108 zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); 47 qdio_free(adapter->ccw_device);
109}
110 48
111int 49 sbal_req = adapter->req_q.sbal;
112zfcp_qdio_allocate(struct zfcp_adapter *adapter) 50 sbal_resp = adapter->resp_q.sbal;
113{
114 struct qdio_initialize *init_data;
115 51
116 init_data = &adapter->qdio_init_data; 52 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
53 free_page((unsigned long) sbal_req[p]);
54 free_page((unsigned long) sbal_resp[p]);
55 }
56}
117 57
118 init_data->cdev = adapter->ccw_device; 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
119 init_data->q_format = QDIO_SCSI_QFMT; 59{
120 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); 60 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
121 ASCEBC(init_data->adapter_name, 8);
122 init_data->qib_param_field_format = 0;
123 init_data->qib_param_field = NULL;
124 init_data->input_slib_elements = NULL;
125 init_data->output_slib_elements = NULL;
126 init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
127 init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
128 init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
129 init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
130 init_data->no_input_qs = 1;
131 init_data->no_output_qs = 1;
132 init_data->input_handler = zfcp_qdio_response_handler;
133 init_data->output_handler = zfcp_qdio_request_handler;
134 init_data->int_parm = (unsigned long) adapter;
135 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
136 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
137 init_data->input_sbal_addr_array =
138 (void **) (adapter->response_queue.buffer);
139 init_data->output_sbal_addr_array =
140 (void **) (adapter->request_queue.buffer);
141 61
142 return qdio_allocate(init_data); 62 zfcp_erp_adapter_reopen(adapter,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
64 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
143} 65}
144 66
145/* 67static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
146 * function: zfcp_qdio_handler_error_check
147 *
148 * purpose: called by the response handler to determine error condition
149 *
150 * returns: error flag
151 *
152 */
153static int
154zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
155 unsigned int qdio_error, unsigned int siga_error,
156 int first_element, int elements_processed)
157{ 68{
158 int retval = 0; 69 int i, sbal_idx;
159 70
160 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 71 for (i = first; i < first + cnt; i++) {
161 retval = -EIO; 72 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
162 73 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
163 ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, "
164 "qdio_error=0x%x, siga_error=0x%x)\n",
165 status, qdio_error, siga_error);
166
167 zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
168 first_element, elements_processed);
169 /*
170 * Restarting IO on the failed adapter from scratch.
171 * Since we have been using this adapter, it is save to assume
172 * that it is not failed but recoverable. The card seems to
173 * report link-up events by self-initiated queue shutdown.
174 * That is why we need to clear the link-down flag
175 * which is set again in case we have missed by a mile.
176 */
177 zfcp_erp_adapter_reopen(adapter,
178 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
179 ZFCP_STATUS_COMMON_ERP_FAILED, 140,
180 NULL);
181 } 74 }
182 return retval;
183} 75}
184 76
185/* 77static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
186 * function: zfcp_qdio_request_handler 78 int queue_no, int first, int count,
187 * 79 unsigned long parm)
188 * purpose: is called by QDIO layer for completed SBALs in request queue
189 *
190 * returns: (void)
191 */
192static void
193zfcp_qdio_request_handler(struct ccw_device *ccw_device,
194 unsigned int status,
195 unsigned int qdio_error,
196 unsigned int siga_error,
197 unsigned int queue_number,
198 int first_element,
199 int elements_processed,
200 unsigned long int_parm)
201{ 80{
202 struct zfcp_adapter *adapter; 81 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
203 struct zfcp_qdio_queue *queue; 82 struct zfcp_qdio_queue *queue = &adapter->req_q;
204
205 adapter = (struct zfcp_adapter *) int_parm;
206 queue = &adapter->request_queue;
207 83
208 ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", 84 if (unlikely(qdio_err)) {
209 zfcp_get_busid_by_adapter(adapter), 85 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
210 first_element, elements_processed); 86 zfcp_qdio_handler_error(adapter, 140);
211 87 return;
212 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 88 }
213 siga_error, first_element,
214 elements_processed)))
215 goto out;
216 /*
217 * we stored address of struct zfcp_adapter data structure
218 * associated with irq in int_parm
219 */
220 89
221 /* cleanup all SBALs being program-owned now */ 90 /* cleanup all SBALs being program-owned now */
222 zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); 91 zfcp_qdio_zero_sbals(queue->sbal, first, count);
223 92
224 /* increase free space in outbound queue */ 93 atomic_add(count, &queue->count);
225 atomic_add(elements_processed, &queue->free_count);
226 ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count));
227 wake_up(&adapter->request_wq); 94 wake_up(&adapter->request_wq);
228 ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n",
229 elements_processed, atomic_read(&queue->free_count));
230 out:
231 return;
232} 95}
233 96
234/**
235 * zfcp_qdio_reqid_check - checks for valid reqids.
236 */
237static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, 97static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
238 unsigned long req_id) 98 unsigned long req_id, int sbal_idx)
239{ 99{
240 struct zfcp_fsf_req *fsf_req; 100 struct zfcp_fsf_req *fsf_req;
241 unsigned long flags; 101 unsigned long flags;
@@ -248,203 +108,114 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
248 * Unknown request means that we have potentially memory 108 * Unknown request means that we have potentially memory
249 * corruption and must stop the machine immediatly. 109 * corruption and must stop the machine immediatly.
250 */ 110 */
251 panic("error: unknown request id (%ld) on adapter %s.\n", 111 panic("error: unknown request id (%lx) on adapter %s.\n",
252 req_id, zfcp_get_busid_by_adapter(adapter)); 112 req_id, zfcp_get_busid_by_adapter(adapter));
253 113
254 zfcp_reqlist_remove(adapter, fsf_req); 114 zfcp_reqlist_remove(adapter, fsf_req);
255 atomic_dec(&adapter->reqs_active);
256 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 115 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
257 116
258 /* finish the FSF request */ 117 fsf_req->sbal_response = sbal_idx;
259 zfcp_fsf_req_complete(fsf_req); 118 zfcp_fsf_req_complete(fsf_req);
260} 119}
261 120
262/* 121static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
263 * function: zfcp_qdio_response_handler
264 *
265 * purpose: is called by QDIO layer for completed SBALs in response queue
266 *
267 * returns: (void)
268 */
269static void
270zfcp_qdio_response_handler(struct ccw_device *ccw_device,
271 unsigned int status,
272 unsigned int qdio_error,
273 unsigned int siga_error,
274 unsigned int queue_number,
275 int first_element,
276 int elements_processed,
277 unsigned long int_parm)
278{ 122{
279 struct zfcp_adapter *adapter; 123 struct zfcp_qdio_queue *queue = &adapter->resp_q;
280 struct zfcp_qdio_queue *queue; 124 struct ccw_device *cdev = adapter->ccw_device;
281 int buffer_index; 125 u8 count, start = queue->first;
282 int i; 126 unsigned int retval;
283 struct qdio_buffer *buffer;
284 int retval = 0;
285 u8 count;
286 u8 start;
287 volatile struct qdio_buffer_element *buffere = NULL;
288 int buffere_index;
289
290 adapter = (struct zfcp_adapter *) int_parm;
291 queue = &adapter->response_queue;
292
293 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
294 siga_error, first_element,
295 elements_processed)))
296 goto out;
297 127
298 /* 128 count = atomic_read(&queue->count) + processed;
299 * we stored address of struct zfcp_adapter data structure 129
300 * associated with irq in int_parm 130 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
301 */ 131
132 if (unlikely(retval)) {
133 atomic_set(&queue->count, count);
134 /* FIXME: Recover this with an adapter reopen? */
135 } else {
136 queue->first += count;
137 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
138 atomic_set(&queue->count, 0);
139 }
140}
141
142static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
143 int queue_no, int first, int count,
144 unsigned long parm)
145{
146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
147 struct zfcp_qdio_queue *queue = &adapter->resp_q;
148 volatile struct qdio_buffer_element *sbale;
149 int sbal_idx, sbale_idx, sbal_no;
150
151 if (unlikely(qdio_err)) {
152 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
153 zfcp_qdio_handler_error(adapter, 147);
154 return;
155 }
302 156
303 buffere = &(queue->buffer[first_element]->element[0]);
304 ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
305 /* 157 /*
306 * go through all SBALs from input queue currently 158 * go through all SBALs from input queue currently
307 * returned by QDIO layer 159 * returned by QDIO layer
308 */ 160 */
309 161 for (sbal_no = 0; sbal_no < count; sbal_no++) {
310 for (i = 0; i < elements_processed; i++) { 162 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
311
312 buffer_index = first_element + i;
313 buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
314 buffer = queue->buffer[buffer_index];
315 163
316 /* go through all SBALEs of SBAL */ 164 /* go through all SBALEs of SBAL */
317 for (buffere_index = 0; 165 for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
318 buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; 166 sbale_idx++) {
319 buffere_index++) { 167 sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
320
321 /* look for QDIO request identifiers in SB */
322 buffere = &buffer->element[buffere_index];
323 zfcp_qdio_reqid_check(adapter, 168 zfcp_qdio_reqid_check(adapter,
324 (unsigned long) buffere->addr); 169 (unsigned long) sbale->addr,
325 170 sbal_idx);
326 /* 171 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
327 * A single used SBALE per inbound SBALE has been
328 * implemented by QDIO so far. Hope they will
329 * do some optimisation. Will need to change to
330 * unlikely() then.
331 */
332 if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
333 break; 172 break;
334 }; 173 };
335 174
336 if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { 175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
337 ZFCP_LOG_NORMAL("bug: End of inbound data " 176 dev_warn(&adapter->ccw_device->dev,
338 "not marked!\n"); 177 "Protocol violation by adapter. "
339 } 178 "Continuing operations.\n");
340 } 179 }
341 180
342 /* 181 /*
343 * put range of SBALs back to response queue 182 * put range of SBALs back to response queue
344 * (including SBALs which have already been free before) 183 * (including SBALs which have already been free before)
345 */ 184 */
346 count = atomic_read(&queue->free_count) + elements_processed; 185 zfcp_qdio_resp_put_back(adapter, count);
347 start = queue->free_index;
348
349 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
350 "queue_no=%i, index_in_queue=%i, count=%i, "
351 "buffers=0x%lx\n",
352 zfcp_get_busid_by_adapter(adapter),
353 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
354 0, start, count, (unsigned long) &queue->buffer[start]);
355
356 retval = do_QDIO(ccw_device,
357 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
358 0, start, count, NULL);
359
360 if (unlikely(retval)) {
361 atomic_set(&queue->free_count, count);
362 ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
363 "queues may be down "
364 "(count=%d, start=%d, retval=%d)\n",
365 count, start, retval);
366 } else {
367 queue->free_index += count;
368 queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
369 atomic_set(&queue->free_count, 0);
370 ZFCP_LOG_TRACE("%i buffers enqueued to response "
371 "queue at position %i\n", count, start);
372 }
373 out:
374 return;
375} 186}
376 187
377/** 188/**
378 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 189 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
379 * @queue: queue from which SBALE should be returned 190 * @fsf_req: pointer to struct fsf_req
380 * @sbal: specifies number of SBAL in queue 191 * Returns: pointer to qdio_buffer_element (SBALE) structure
381 * @sbale: specifes number of SBALE in SBAL
382 */
383static inline volatile struct qdio_buffer_element *
384zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
385{
386 return &queue->buffer[sbal]->element[sbale];
387}
388
389/**
390 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
391 * a struct zfcp_fsf_req
392 */ 192 */
393volatile struct qdio_buffer_element * 193volatile struct qdio_buffer_element *
394zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 194zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
395{ 195{
396 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 196 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
397 sbal, sbale);
398} 197}
399 198
400/** 199/**
401 * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for 200 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
402 * a struct zfcp_fsf_req 201 * @fsf_req: pointer to struct fsf_req
403 */ 202 * Returns: pointer to qdio_buffer_element (SBALE) structure
404static inline volatile struct qdio_buffer_element *
405zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
406{
407 return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue,
408 sbal, sbale);
409}
410
411/**
412 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
413 * a struct zfcp_fsf_req
414 */ 203 */
415volatile struct qdio_buffer_element * 204volatile struct qdio_buffer_element *
416zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 205zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
417{ 206{
418 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 207 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
419 fsf_req->sbale_curr); 208 req->sbale_curr);
420} 209}
421 210
422/** 211static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
423 * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used
424 * on the request_queue for a struct zfcp_fsf_req
425 * @fsf_req: the number of the last SBAL that can be used is stored herein
426 * @max_sbals: used to pass an upper limit for the number of SBALs
427 *
428 * Note: We can assume at least one free SBAL in the request_queue when called.
429 */
430static void
431zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
432{ 212{
433 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 213 int count = atomic_read(&fsf_req->adapter->req_q.count);
434 count = min(count, max_sbals); 214 count = min(count, max_sbals);
435 fsf_req->sbal_last = fsf_req->sbal_first; 215 fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
436 fsf_req->sbal_last += (count - 1); 216 % QDIO_MAX_BUFFERS_PER_Q;
437 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
438} 217}
439 218
440/**
441 * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a
442 * request
443 * @fsf_req: zfcp_fsf_req to be processed
444 * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL
445 *
446 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
447 */
448static volatile struct qdio_buffer_element * 219static volatile struct qdio_buffer_element *
449zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 220zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
450{ 221{
@@ -455,16 +226,16 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
455 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 226 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
456 227
457 /* don't exceed last allowed SBAL */ 228 /* don't exceed last allowed SBAL */
458 if (fsf_req->sbal_curr == fsf_req->sbal_last) 229 if (fsf_req->sbal_last == fsf_req->sbal_limit)
459 return NULL; 230 return NULL;
460 231
461 /* set chaining flag in first SBALE of current SBAL */ 232 /* set chaining flag in first SBALE of current SBAL */
462 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 233 sbale = zfcp_qdio_sbale_req(fsf_req);
463 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 234 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
464 235
465 /* calculate index of next SBAL */ 236 /* calculate index of next SBAL */
466 fsf_req->sbal_curr++; 237 fsf_req->sbal_last++;
467 fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; 238 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
468 239
469 /* keep this requests number of SBALs up-to-date */ 240 /* keep this requests number of SBALs up-to-date */
470 fsf_req->sbal_number++; 241 fsf_req->sbal_number++;
@@ -479,214 +250,246 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
479 return sbale; 250 return sbale;
480} 251}
481 252
482/**
483 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
484 */
485static volatile struct qdio_buffer_element * 253static volatile struct qdio_buffer_element *
486zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 254zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
487{ 255{
488 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 256 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
489 return zfcp_qdio_sbal_chain(fsf_req, sbtype); 257 return zfcp_qdio_sbal_chain(fsf_req, sbtype);
490
491 fsf_req->sbale_curr++; 258 fsf_req->sbale_curr++;
492
493 return zfcp_qdio_sbale_curr(fsf_req); 259 return zfcp_qdio_sbale_curr(fsf_req);
494} 260}
495 261
496/** 262static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
497 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
498 * with zero from
499 */
500static int
501zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
502{
503 struct qdio_buffer **buf = queue->buffer;
504 int curr = first;
505 int count = 0;
506
507 for(;;) {
508 curr %= QDIO_MAX_BUFFERS_PER_Q;
509 count++;
510 memset(buf[curr], 0, sizeof(struct qdio_buffer));
511 if (curr == last)
512 break;
513 curr++;
514 }
515 return count;
516}
517
518
519/**
520 * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req
521 */
522static inline int
523zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
524{ 263{
525 return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, 264 struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
526 fsf_req->sbal_first, fsf_req->sbal_curr); 265 int first = fsf_req->sbal_first;
266 int last = fsf_req->sbal_last;
267 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
268 QDIO_MAX_BUFFERS_PER_Q + 1;
269 zfcp_qdio_zero_sbals(sbal, first, count);
527} 270}
528 271
529 272static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
530/** 273 unsigned int sbtype, void *start_addr,
531 * zfcp_qdio_sbale_fill - set address and length in current SBALE 274 unsigned int total_length)
532 * on request_queue
533 */
534static void
535zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
536 void *addr, int length)
537{ 275{
538 volatile struct qdio_buffer_element *sbale; 276 volatile struct qdio_buffer_element *sbale;
539
540 sbale = zfcp_qdio_sbale_curr(fsf_req);
541 sbale->addr = addr;
542 sbale->length = length;
543}
544
545/**
546 * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s)
547 * @fsf_req: request to be processed
548 * @sbtype: SBALE flags
549 * @start_addr: address of memory segment
550 * @total_length: length of memory segment
551 *
552 * Alignment and length of the segment determine how many SBALEs are needed
553 * for the memory segment.
554 */
555static int
556zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
557 void *start_addr, unsigned long total_length)
558{
559 unsigned long remaining, length; 277 unsigned long remaining, length;
560 void *addr; 278 void *addr;
561 279
562 /* split segment up heeding page boundaries */ 280 /* split segment up */
563 for (addr = start_addr, remaining = total_length; remaining > 0; 281 for (addr = start_addr, remaining = total_length; remaining > 0;
564 addr += length, remaining -= length) { 282 addr += length, remaining -= length) {
565 /* get next free SBALE for new piece */ 283 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
566 if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { 284 if (!sbale) {
567 /* no SBALE left, clean up and leave */ 285 zfcp_qdio_undo_sbals(fsf_req);
568 zfcp_qdio_sbals_wipe(fsf_req);
569 return -EINVAL; 286 return -EINVAL;
570 } 287 }
571 /* calculate length of new piece */ 288
289 /* new piece must not exceed next page boundary */
572 length = min(remaining, 290 length = min(remaining,
573 (PAGE_SIZE - ((unsigned long) addr & 291 (PAGE_SIZE - ((unsigned long)addr &
574 (PAGE_SIZE - 1)))); 292 (PAGE_SIZE - 1))));
575 /* fill current SBALE with calculated piece */ 293 sbale->addr = addr;
576 zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); 294 sbale->length = length;
577 } 295 }
578 return total_length; 296 return 0;
579} 297}
580 298
581
582/** 299/**
583 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 300 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
584 * @fsf_req: request to be processed 301 * @fsf_req: request to be processed
585 * @sbtype: SBALE flags 302 * @sbtype: SBALE flags
586 * @sg: scatter-gather list 303 * @sg: scatter-gather list
587 * @sg_count: number of elements in scatter-gather list
588 * @max_sbals: upper bound for number of SBALs to be used 304 * @max_sbals: upper bound for number of SBALs to be used
305 * Returns: number of bytes, or error (negativ)
589 */ 306 */
590int 307int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
591zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 308 struct scatterlist *sg, int max_sbals)
592 struct scatterlist *sgl, int sg_count, int max_sbals)
593{ 309{
594 int sg_index;
595 struct scatterlist *sg_segment;
596 int retval;
597 volatile struct qdio_buffer_element *sbale; 310 volatile struct qdio_buffer_element *sbale;
598 int bytes = 0; 311 int retval, bytes = 0;
599 312
600 /* figure out last allowed SBAL */ 313 /* figure out last allowed SBAL */
601 zfcp_qdio_sbal_limit(fsf_req, max_sbals); 314 zfcp_qdio_sbal_limit(fsf_req, max_sbals);
602 315
603 /* set storage-block type for current SBAL */ 316 /* set storage-block type for this request */
604 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 317 sbale = zfcp_qdio_sbale_req(fsf_req);
605 sbale->flags |= sbtype; 318 sbale->flags |= sbtype;
606 319
607 /* process all segements of scatter-gather list */ 320 for (; sg; sg = sg_next(sg)) {
608 for_each_sg(sgl, sg_segment, sg_count, sg_index) { 321 retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
609 retval = zfcp_qdio_sbals_from_segment( 322 sg->length);
610 fsf_req, 323 if (retval < 0)
611 sbtype, 324 return retval;
612 zfcp_sg_to_address(sg_segment), 325 bytes += sg->length;
613 sg_segment->length);
614 if (retval < 0) {
615 bytes = retval;
616 goto out;
617 } else
618 bytes += retval;
619 } 326 }
327
620 /* assume that no other SBALEs are to follow in the same SBAL */ 328 /* assume that no other SBALEs are to follow in the same SBAL */
621 sbale = zfcp_qdio_sbale_curr(fsf_req); 329 sbale = zfcp_qdio_sbale_curr(fsf_req);
622 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 330 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
623out: 331
624 return bytes; 332 return bytes;
625} 333}
626 334
627
628/** 335/**
629 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command 336 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
630 * @fsf_req: request to be processed 337 * @fsf_req: pointer to struct zfcp_fsf_req
631 * @sbtype: SBALE flags 338 * Returns: 0 on success, error otherwise
632 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
633 * to fill SBALs
634 */ 339 */
635int 340int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
636zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
637 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
638{ 341{
639 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), 342 struct zfcp_adapter *adapter = fsf_req->adapter;
640 scsi_sg_count(scsi_cmnd), 343 struct zfcp_qdio_queue *req_q = &adapter->req_q;
641 ZFCP_MAX_SBALS_PER_REQ); 344 int first = fsf_req->sbal_first;
345 int count = fsf_req->sbal_number;
346 int retval, pci, pci_batch;
347 volatile struct qdio_buffer_element *sbale;
348
349 /* acknowledgements for transferred buffers */
350 pci_batch = req_q->pci_batch + count;
351 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
352 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
353 pci = first + count - (pci_batch + 1);
354 pci %= QDIO_MAX_BUFFERS_PER_Q;
355 sbale = zfcp_qdio_sbale(req_q, pci, 0);
356 sbale->flags |= SBAL_FLAGS0_PCI;
357 }
358
359 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
360 count);
361 if (unlikely(retval)) {
362 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
363 return retval;
364 }
365
366 /* account for transferred buffers */
367 atomic_sub(count, &req_q->count);
368 req_q->first += count;
369 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
370 req_q->pci_batch = pci_batch;
371 return 0;
642} 372}
643 373
644/** 374/**
645 * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed 375 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
376 * @adapter: pointer to struct zfcp_adapter
377 * Returns: -ENOMEM on memory allocation error or return value from
378 * qdio_allocate
646 */ 379 */
647int 380int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
648zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue,
649 struct zfcp_fsf_req *fsf_req)
650{ 381{
651 int new_distance_from_int; 382 struct qdio_initialize *init_data;
652 int pci_pos;
653 volatile struct qdio_buffer_element *sbale;
654 383
655 new_distance_from_int = req_queue->distance_from_int + 384 if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
656 fsf_req->sbal_number; 385 zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
657 386 return -ENOMEM;
658 if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { 387
659 new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; 388 init_data = &adapter->qdio_init_data;
660 pci_pos = fsf_req->sbal_first; 389
661 pci_pos += fsf_req->sbal_number; 390 init_data->cdev = adapter->ccw_device;
662 pci_pos -= new_distance_from_int; 391 init_data->q_format = QDIO_ZFCP_QFMT;
663 pci_pos -= 1; 392 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
664 pci_pos %= QDIO_MAX_BUFFERS_PER_Q; 393 ASCEBC(init_data->adapter_name, 8);
665 sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); 394 init_data->qib_param_field_format = 0;
666 sbale->flags |= SBAL_FLAGS0_PCI; 395 init_data->qib_param_field = NULL;
667 } 396 init_data->input_slib_elements = NULL;
668 return new_distance_from_int; 397 init_data->output_slib_elements = NULL;
398 init_data->no_input_qs = 1;
399 init_data->no_output_qs = 1;
400 init_data->input_handler = zfcp_qdio_int_resp;
401 init_data->output_handler = zfcp_qdio_int_req;
402 init_data->int_parm = (unsigned long) adapter;
403 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
404 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
405 init_data->input_sbal_addr_array =
406 (void **) (adapter->resp_q.sbal);
407 init_data->output_sbal_addr_array =
408 (void **) (adapter->req_q.sbal);
409
410 return qdio_allocate(init_data);
669} 411}
670 412
671/* 413/**
672 * function: zfcp_zero_sbals 414 * zfcp_close_qdio - close qdio queues for an adapter
673 *
674 * purpose: zeros specified range of SBALs
675 *
676 * returns:
677 */ 415 */
678void 416void zfcp_qdio_close(struct zfcp_adapter *adapter)
679zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count)
680{ 417{
681 int cur_pos; 418 struct zfcp_qdio_queue *req_q;
682 int index; 419 int first, count;
683 420
684 for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { 421 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
685 index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; 422 return;
686 memset(buf[index], 0, sizeof (struct qdio_buffer)); 423
687 ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", 424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
688 index, buf[index]); 425 req_q = &adapter->req_q;
426 spin_lock(&req_q->lock);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
428 spin_unlock(&req_q->lock);
429
430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
431
432 /* cleanup used outbound sbals */
433 count = atomic_read(&req_q->count);
434 if (count < QDIO_MAX_BUFFERS_PER_Q) {
435 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
436 count = QDIO_MAX_BUFFERS_PER_Q - count;
437 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
689 } 438 }
439 req_q->first = 0;
440 atomic_set(&req_q->count, 0);
441 req_q->pci_batch = 0;
442 adapter->resp_q.first = 0;
443 atomic_set(&adapter->resp_q.count, 0);
690} 444}
691 445
692#undef ZFCP_LOG_AREA 446/**
447 * zfcp_qdio_open - prepare and initialize response queue
448 * @adapter: pointer to struct zfcp_adapter
449 * Returns: 0 on success, otherwise -EIO
450 */
451int zfcp_qdio_open(struct zfcp_adapter *adapter)
452{
453 volatile struct qdio_buffer_element *sbale;
454 int cc;
455
456 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
457 return -EIO;
458
459 if (qdio_establish(&adapter->qdio_init_data)) {
460 dev_err(&adapter->ccw_device->dev,
461 "Establish of QDIO queues failed.\n");
462 return -EIO;
463 }
464
465 if (qdio_activate(adapter->ccw_device)) {
466 dev_err(&adapter->ccw_device->dev,
467 "Activate of QDIO queues failed.\n");
468 goto failed_qdio;
469 }
470
471 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
472 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
473 sbale->length = 0;
474 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
475 sbale->addr = NULL;
476 }
477
478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
479 QDIO_MAX_BUFFERS_PER_Q)) {
480 dev_err(&adapter->ccw_device->dev,
481 "Init of QDIO response queue failed.\n");
482 goto failed_qdio;
483 }
484
485 /* set index of first avalable SBALS / number of available SBALS */
486 adapter->req_q.first = 0;
487 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
488 adapter->req_q.pci_batch = 0;
489
490 return 0;
491
492failed_qdio:
493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
494 return -EIO;
495}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 01687559dc06..aeae56b00b45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -1,220 +1,65 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Interface to Linux SCSI midlayer.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
23
24#include "zfcp_ext.h" 9#include "zfcp_ext.h"
25#include <asm/atomic.h> 10#include <asm/atomic.h>
26 11
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
28static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
29static int zfcp_scsi_slave_configure(struct scsi_device *sdp);
30static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
31 void (*done) (struct scsi_cmnd *));
32static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *);
35static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
36static int zfcp_task_management_function(struct zfcp_unit *, u8,
37 struct scsi_cmnd *);
38
39static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
40 unsigned int, unsigned int);
41
42static struct device_attribute *zfcp_sysfs_sdev_attrs[];
43static struct device_attribute *zfcp_a_stats_attrs[];
44
45struct zfcp_data zfcp_data = {
46 .scsi_host_template = {
47 .name = ZFCP_NAME,
48 .module = THIS_MODULE,
49 .proc_name = "zfcp",
50 .slave_alloc = zfcp_scsi_slave_alloc,
51 .slave_configure = zfcp_scsi_slave_configure,
52 .slave_destroy = zfcp_scsi_slave_destroy,
53 .queuecommand = zfcp_scsi_queuecommand,
54 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
55 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
56 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
57 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .can_queue = 4096,
59 .this_id = -1,
60 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
61 .cmd_per_lun = 1,
62 .use_clustering = 1,
63 .sdev_attrs = zfcp_sysfs_sdev_attrs,
64 .max_sectors = ZFCP_MAX_SECTORS,
65 .shost_attrs = zfcp_a_stats_attrs,
66 },
67 .driver_version = ZFCP_VERSION,
68};
69
70/* Find start of Response Information in FCP response unit*/
71char *
72zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
73{
74 char *fcp_rsp_info_ptr;
75
76 fcp_rsp_info_ptr =
77 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
78
79 return fcp_rsp_info_ptr;
80}
81
82/* Find start of Sense Information in FCP response unit*/ 12/* Find start of Sense Information in FCP response unit*/
83char * 13char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
84zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
85{ 14{
86 char *fcp_sns_info_ptr; 15 char *fcp_sns_info_ptr;
87 16
88 fcp_sns_info_ptr = 17 fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
89 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
90 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) 18 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
91 fcp_sns_info_ptr = (char *) fcp_sns_info_ptr + 19 fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
92 fcp_rsp_iu->fcp_rsp_len;
93 20
94 return fcp_sns_info_ptr; 21 return fcp_sns_info_ptr;
95} 22}
96 23
97static fcp_dl_t * 24void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
98zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
99{ 25{
100 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 26 fcp_dl_t *fcp_dl_ptr;
101 fcp_dl_t *fcp_dl_addr;
102 27
103 fcp_dl_addr = (fcp_dl_t *)
104 ((unsigned char *) fcp_cmd +
105 sizeof (struct fcp_cmnd_iu) + additional_length);
106 /* 28 /*
107 * fcp_dl_addr = start address of fcp_cmnd structure + 29 * fcp_dl_addr = start address of fcp_cmnd structure +
108 * size of fixed part + size of dynamically sized add_dcp_cdb field 30 * size of fixed part + size of dynamically sized add_dcp_cdb field
109 * SEE FCP-2 documentation 31 * SEE FCP-2 documentation
110 */ 32 */
111 return fcp_dl_addr; 33 fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
34 (fcp_cmd->add_fcp_cdb_length << 2));
35 *fcp_dl_ptr = fcp_dl;
112} 36}
113 37
114fcp_dl_t
115zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd)
116{
117 return *zfcp_get_fcp_dl_ptr(fcp_cmd);
118}
119
120void
121zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
122{
123 *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl;
124}
125
126/*
127 * note: it's a bit-or operation not an assignment
128 * regarding the specified byte
129 */
130static inline void
131set_byte(int *result, char status, char pos)
132{
133 *result |= status << (pos * 8);
134}
135
136void
137set_host_byte(int *result, char status)
138{
139 set_byte(result, status, 2);
140}
141
142void
143set_driver_byte(int *result, char status)
144{
145 set_byte(result, status, 3);
146}
147
148static int
149zfcp_scsi_slave_alloc(struct scsi_device *sdp)
150{
151 struct zfcp_adapter *adapter;
152 struct zfcp_unit *unit;
153 unsigned long flags;
154 int retval = -ENXIO;
155
156 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
157 if (!adapter)
158 goto out;
159
160 read_lock_irqsave(&zfcp_data.config_lock, flags);
161 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
162 if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
163 &unit->status)) {
164 sdp->hostdata = unit;
165 unit->device = sdp;
166 zfcp_unit_get(unit);
167 retval = 0;
168 }
169 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
170 out:
171 return retval;
172}
173
174/**
175 * zfcp_scsi_slave_destroy - called when scsi device is removed
176 *
177 * Remove reference to associated scsi device for an zfcp_unit.
178 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
179 * or a scan for this device might have failed.
180 */
181static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 38static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
182{ 39{
183 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
184 41 WARN_ON(!unit);
185 if (unit) { 42 if (unit) {
186 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 43 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
187 sdpnt->hostdata = NULL; 44 sdpnt->hostdata = NULL;
188 unit->device = NULL; 45 unit->device = NULL;
189 zfcp_erp_unit_failed(unit, 12, NULL); 46 zfcp_erp_unit_failed(unit, 12, NULL);
190 zfcp_unit_put(unit); 47 zfcp_unit_put(unit);
191 } else 48 }
192 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
193 "address %p\n", sdpnt);
194} 49}
195 50
196/* 51static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
197 * called from scsi midlayer to allow finetuning of a device.
198 */
199static int
200zfcp_scsi_slave_configure(struct scsi_device *sdp)
201{ 52{
202 if (sdp->tagged_supported) 53 if (sdp->tagged_supported)
203 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN); 54 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
204 else 55 else
205 scsi_adjust_queue_depth(sdp, 0, 1); 56 scsi_adjust_queue_depth(sdp, 0, 1);
206 return 0; 57 return 0;
207} 58}
208 59
209/** 60static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
210 * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function
211 * @scpnt: pointer to struct scsi_cmnd where result is set
212 * @result: result to be set in scpnt (e.g. DID_ERROR)
213 */
214static void
215zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
216{ 61{
217 set_host_byte(&scpnt->result, result); 62 set_host_byte(scpnt, result);
218 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 63 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
219 zfcp_scsi_dbf_event_result("fail", 4, 64 zfcp_scsi_dbf_event_result("fail", 4,
220 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 65 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
@@ -223,114 +68,13 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
223 scpnt->scsi_done(scpnt); 68 scpnt->scsi_done(scpnt);
224} 69}
225 70
226/** 71static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
227 * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and 72 void (*done) (struct scsi_cmnd *))
228 * zfcp_scsi_command_sync
229 * @adapter: adapter where scsi command is issued
230 * @unit: unit to which scsi command is sent
231 * @scpnt: scsi command to be sent
232 * @timer: timer to be started if request is successfully initiated
233 *
234 * Note: In scsi_done function must be set in scpnt.
235 */
236int
237zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
238 struct scsi_cmnd *scpnt, int use_timer)
239{
240 int tmp;
241 int retval;
242
243 retval = 0;
244
245 BUG_ON((adapter == NULL) || (adapter != unit->port->adapter));
246 BUG_ON(scpnt->scsi_done == NULL);
247
248 if (unlikely(NULL == unit)) {
249 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
250 goto out;
251 }
252
253 if (unlikely(
254 atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) ||
255 !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) {
256 ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port "
257 "0x%016Lx on adapter %s\n",
258 unit->fcp_lun, unit->port->wwpn,
259 zfcp_get_busid_by_adapter(adapter));
260 zfcp_scsi_command_fail(scpnt, DID_ERROR);
261 goto out;
262 }
263
264 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
265 ZFCP_REQ_AUTO_CLEANUP);
266 if (unlikely(tmp == -EBUSY)) {
267 ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
268 "on port 0x%016Lx in recovery\n",
269 zfcp_get_busid_by_unit(unit),
270 unit->fcp_lun, unit->port->wwpn);
271 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
272 goto out;
273 }
274
275 if (unlikely(tmp < 0)) {
276 ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
277 retval = SCSI_MLQUEUE_HOST_BUSY;
278 }
279
280out:
281 return retval;
282}
283
284static void
285zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
286{
287 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
288 complete(wait);
289}
290
291
292/**
293 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
294 * @unit: unit where command is sent to
295 * @scpnt: scsi command to be sent
296 * @use_timer: indicates whether timer should be setup or not
297 * Return: 0
298 *
299 * Errors are indicated in scpnt->result
300 */
301int
302zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
303 int use_timer)
304{
305 int ret;
306 DECLARE_COMPLETION_ONSTACK(wait);
307
308 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
309 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
310 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
311 use_timer);
312 if (ret == 0)
313 wait_for_completion(&wait);
314
315 scpnt->SCp.ptr = NULL;
316
317 return 0;
318}
319
320/*
321 * function: zfcp_scsi_queuecommand
322 *
323 * purpose: enqueues a SCSI command to the specified target device
324 *
325 * returns: 0 - success, SCSI command enqueued
326 * !0 - failure
327 */
328static int
329zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
330 void (*done) (struct scsi_cmnd *))
331{ 73{
332 struct zfcp_unit *unit; 74 struct zfcp_unit *unit;
333 struct zfcp_adapter *adapter; 75 struct zfcp_adapter *adapter;
76 int status;
77 int ret;
334 78
335 /* reset the status for this request */ 79 /* reset the status for this request */
336 scpnt->result = 0; 80 scpnt->result = 0;
@@ -342,44 +86,76 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
342 * (stored there by zfcp_scsi_slave_alloc) 86 * (stored there by zfcp_scsi_slave_alloc)
343 */ 87 */
344 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 88 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
345 unit = (struct zfcp_unit *) scpnt->device->hostdata; 89 unit = scpnt->device->hostdata;
90
91 BUG_ON(!adapter || (adapter != unit->port->adapter));
92 BUG_ON(!scpnt->scsi_done);
346 93
347 return zfcp_scsi_command_async(adapter, unit, scpnt, 0); 94 if (unlikely(!unit)) {
95 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
96 return 0;
97 }
98
99 status = atomic_read(&unit->status);
100 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
101 !(status & ZFCP_STATUS_COMMON_RUNNING))) {
102 zfcp_scsi_command_fail(scpnt, DID_ERROR);
103 return 0;;
104 }
105
106 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
107 ZFCP_REQ_AUTO_CLEANUP);
108 if (unlikely(ret == -EBUSY))
109 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
110 else if (unlikely(ret < 0))
111 return SCSI_MLQUEUE_HOST_BUSY;
112
113 return ret;
348} 114}
349 115
350static struct zfcp_unit * 116static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
351zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, 117 int channel, unsigned int id,
352 unsigned int lun) 118 unsigned int lun)
353{ 119{
354 struct zfcp_port *port; 120 struct zfcp_port *port;
355 struct zfcp_unit *unit, *retval = NULL; 121 struct zfcp_unit *unit;
356 122
357 list_for_each_entry(port, &adapter->port_list_head, list) { 123 list_for_each_entry(port, &adapter->port_list_head, list) {
358 if (!port->rport || (id != port->rport->scsi_target_id)) 124 if (!port->rport || (id != port->rport->scsi_target_id))
359 continue; 125 continue;
360 list_for_each_entry(unit, &port->unit_list_head, list) 126 list_for_each_entry(unit, &port->unit_list_head, list)
361 if (lun == unit->scsi_lun) { 127 if (lun == unit->scsi_lun)
362 retval = unit; 128 return unit;
363 goto out;
364 }
365 } 129 }
366 out: 130
131 return NULL;
132}
133
134static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
135{
136 struct zfcp_adapter *adapter;
137 struct zfcp_unit *unit;
138 unsigned long flags;
139 int retval = -ENXIO;
140
141 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
142 if (!adapter)
143 goto out;
144
145 read_lock_irqsave(&zfcp_data.config_lock, flags);
146 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
147 if (unit &&
148 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) {
149 sdp->hostdata = unit;
150 unit->device = sdp;
151 zfcp_unit_get(unit);
152 retval = 0;
153 }
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155out:
367 return retval; 156 return retval;
368} 157}
369 158
370/**
371 * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
372 * @scpnt: pointer to scsi_cmnd to be aborted
373 * Return: SUCCESS - command has been aborted and cleaned up in internal
374 * bookkeeping, SCSI stack won't be called for aborted command
375 * FAILED - otherwise
376 *
377 * We do not need to care for a SCSI command which completes normally
378 * but late during this abort routine runs. We are allowed to return
379 * late commands to the SCSI stack. It tracks the state of commands and
380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.)
382 */
383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 159static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 160{
385 struct Scsi_Host *scsi_host; 161 struct Scsi_Host *scsi_host;
@@ -387,44 +163,37 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
387 struct zfcp_unit *unit; 163 struct zfcp_unit *unit;
388 struct zfcp_fsf_req *fsf_req; 164 struct zfcp_fsf_req *fsf_req;
389 unsigned long flags; 165 unsigned long flags;
390 unsigned long old_req_id; 166 unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
391 int retval = SUCCESS; 167 int retval = SUCCESS;
392 168
393 scsi_host = scpnt->device->host; 169 scsi_host = scpnt->device->host;
394 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 170 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
395 unit = (struct zfcp_unit *) scpnt->device->hostdata; 171 unit = scpnt->device->hostdata;
396
397 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
398 scpnt, zfcp_get_busid_by_adapter(adapter));
399 172
400 /* avoid race condition between late normal completion and abort */ 173 /* avoid race condition between late normal completion and abort */
401 write_lock_irqsave(&adapter->abort_lock, flags); 174 write_lock_irqsave(&adapter->abort_lock, flags);
402 175
403 /* Check whether corresponding fsf_req is still pending */ 176 /* Check whether corresponding fsf_req is still pending */
404 spin_lock(&adapter->req_list_lock); 177 spin_lock(&adapter->req_list_lock);
405 fsf_req = zfcp_reqlist_find(adapter, 178 fsf_req = zfcp_reqlist_find(adapter, old_req_id);
406 (unsigned long) scpnt->host_scribble);
407 spin_unlock(&adapter->req_list_lock); 179 spin_unlock(&adapter->req_list_lock);
408 if (!fsf_req) { 180 if (!fsf_req) {
409 write_unlock_irqrestore(&adapter->abort_lock, flags); 181 write_unlock_irqrestore(&adapter->abort_lock, flags);
410 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0); 182 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
411 retval = SUCCESS; 183 return retval;
412 goto out;
413 } 184 }
414 fsf_req->data = 0; 185 fsf_req->data = NULL;
415 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 186 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
416 old_req_id = fsf_req->req_id;
417 187
418 /* don't access old fsf_req after releasing the abort_lock */ 188 /* don't access old fsf_req after releasing the abort_lock */
419 write_unlock_irqrestore(&adapter->abort_lock, flags); 189 write_unlock_irqrestore(&adapter->abort_lock, flags);
420 190
421 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0); 191 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
422 if (!fsf_req) { 192 if (!fsf_req) {
423 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
424 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 193 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
425 old_req_id); 194 old_req_id);
426 retval = FAILED; 195 retval = FAILED;
427 goto out; 196 return retval;
428 } 197 }
429 198
430 __wait_event(fsf_req->completion_wq, 199 __wait_event(fsf_req->completion_wq,
@@ -432,66 +201,29 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
432 201
433 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 202 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
434 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0); 203 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
435 retval = SUCCESS;
436 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 204 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
437 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0); 205 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
438 retval = SUCCESS;
439 } else { 206 } else {
440 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0); 207 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
441 retval = FAILED; 208 retval = FAILED;
442 } 209 }
443 zfcp_fsf_req_free(fsf_req); 210 zfcp_fsf_req_free(fsf_req);
444 out:
445 return retval;
446}
447
448static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
449{
450 int retval;
451 struct zfcp_unit *unit = scpnt->device->hostdata;
452 211
453 if (!unit) { 212 return retval;
454 WARN_ON(1);
455 return SUCCESS;
456 }
457 retval = zfcp_task_management_function(unit,
458 FCP_LOGICAL_UNIT_RESET,
459 scpnt);
460 return retval ? FAILED : SUCCESS;
461}
462
463static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
464{
465 int retval;
466 struct zfcp_unit *unit = scpnt->device->hostdata;
467
468 if (!unit) {
469 WARN_ON(1);
470 return SUCCESS;
471 }
472 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
473 return retval ? FAILED : SUCCESS;
474} 213}
475 214
476static int 215static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
477zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, 216 struct scsi_cmnd *scpnt)
478 struct scsi_cmnd *scpnt)
479{ 217{
480 struct zfcp_adapter *adapter = unit->port->adapter; 218 struct zfcp_adapter *adapter = unit->port->adapter;
481 struct zfcp_fsf_req *fsf_req; 219 struct zfcp_fsf_req *fsf_req;
482 int retval = 0; 220 int retval = SUCCESS;
483 221
484 /* issue task management function */ 222 /* issue task management function */
485 fsf_req = zfcp_fsf_send_fcp_command_task_management 223 fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0);
486 (adapter, unit, tm_flags, 0);
487 if (!fsf_req) { 224 if (!fsf_req) {
488 ZFCP_LOG_INFO("error: creation of task management request "
489 "failed for unit 0x%016Lx on port 0x%016Lx on "
490 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
491 zfcp_get_busid_by_adapter(adapter));
492 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt); 225 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
493 retval = -ENOMEM; 226 return FAILED;
494 goto out;
495 } 227 }
496 228
497 __wait_event(fsf_req->completion_wq, 229 __wait_event(fsf_req->completion_wq,
@@ -502,87 +234,90 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
502 */ 234 */
503 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 235 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
504 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); 236 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
505 retval = -EIO; 237 retval = FAILED;
506 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { 238 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
507 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); 239 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
508 retval = -ENOTSUPP; 240 retval = FAILED;
509 } else 241 } else
510 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); 242 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
511 243
512 zfcp_fsf_req_free(fsf_req); 244 zfcp_fsf_req_free(fsf_req);
513 out: 245
514 return retval; 246 return retval;
515} 247}
516 248
517/** 249static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
518 * zfcp_scsi_eh_host_reset_handler - handler for host reset 250{
519 */ 251 struct zfcp_unit *unit = scpnt->device->hostdata;
252
253 if (!unit) {
254 WARN_ON(1);
255 return SUCCESS;
256 }
257 return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt);
258}
259
260static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
261{
262 struct zfcp_unit *unit = scpnt->device->hostdata;
263
264 if (!unit) {
265 WARN_ON(1);
266 return SUCCESS;
267 }
268 return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt);
269}
270
520static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 271static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
521{ 272{
522 struct zfcp_unit *unit; 273 struct zfcp_unit *unit;
523 struct zfcp_adapter *adapter; 274 struct zfcp_adapter *adapter;
524 275
525 unit = (struct zfcp_unit*) scpnt->device->hostdata; 276 unit = scpnt->device->hostdata;
526 adapter = unit->port->adapter; 277 adapter = unit->port->adapter;
527
528 ZFCP_LOG_NORMAL("host reset because of problems with "
529 "unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
530 unit->fcp_lun, unit->port->wwpn,
531 zfcp_get_busid_by_adapter(unit->port->adapter));
532
533 zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt); 278 zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
534 zfcp_erp_wait(adapter); 279 zfcp_erp_wait(adapter);
535 280
536 return SUCCESS; 281 return SUCCESS;
537} 282}
538 283
539int 284int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
540zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
541{ 285{
542 int retval = 0; 286 struct ccw_dev_id dev_id;
543 static unsigned int unique_id = 0;
544 287
545 if (adapter->scsi_host) 288 if (adapter->scsi_host)
546 goto out; 289 return 0;
547 290
291 ccw_device_get_id(adapter->ccw_device, &dev_id);
548 /* register adapter as SCSI host with mid layer of SCSI stack */ 292 /* register adapter as SCSI host with mid layer of SCSI stack */
549 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, 293 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
550 sizeof (struct zfcp_adapter *)); 294 sizeof (struct zfcp_adapter *));
551 if (!adapter->scsi_host) { 295 if (!adapter->scsi_host) {
552 ZFCP_LOG_NORMAL("error: registration with SCSI stack failed " 296 dev_err(&adapter->ccw_device->dev,
553 "for adapter %s ", 297 "registration with SCSI stack failed.");
554 zfcp_get_busid_by_adapter(adapter)); 298 return -EIO;
555 retval = -EIO;
556 goto out;
557 } 299 }
558 ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host);
559 300
560 /* tell the SCSI stack some characteristics of this adapter */ 301 /* tell the SCSI stack some characteristics of this adapter */
561 adapter->scsi_host->max_id = 1; 302 adapter->scsi_host->max_id = 1;
562 adapter->scsi_host->max_lun = 1; 303 adapter->scsi_host->max_lun = 1;
563 adapter->scsi_host->max_channel = 0; 304 adapter->scsi_host->max_channel = 0;
564 adapter->scsi_host->unique_id = unique_id++; /* FIXME */ 305 adapter->scsi_host->unique_id = dev_id.devno;
565 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; 306 adapter->scsi_host->max_cmd_len = 255;
566 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 307 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
567 308
568 /*
569 * save a pointer to our own adapter data structure within
570 * hostdata field of SCSI host data structure
571 */
572 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 309 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
573 310
574 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { 311 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
575 scsi_host_put(adapter->scsi_host); 312 scsi_host_put(adapter->scsi_host);
576 retval = -EIO; 313 return -EIO;
577 goto out;
578 } 314 }
579 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status); 315 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
580 out: 316
581 return retval; 317 return 0;
582} 318}
583 319
584void 320void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
585zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
586{ 321{
587 struct Scsi_Host *shost; 322 struct Scsi_Host *shost;
588 struct zfcp_port *port; 323 struct zfcp_port *port;
@@ -590,10 +325,12 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
590 shost = adapter->scsi_host; 325 shost = adapter->scsi_host;
591 if (!shost) 326 if (!shost)
592 return; 327 return;
328
593 read_lock_irq(&zfcp_data.config_lock); 329 read_lock_irq(&zfcp_data.config_lock);
594 list_for_each_entry(port, &adapter->port_list_head, list) 330 list_for_each_entry(port, &adapter->port_list_head, list)
595 if (port->rport) 331 if (port->rport)
596 port->rport = NULL; 332 port->rport = NULL;
333
597 read_unlock_irq(&zfcp_data.config_lock); 334 read_unlock_irq(&zfcp_data.config_lock);
598 fc_remove_host(shost); 335 fc_remove_host(shost);
599 scsi_remove_host(shost); 336 scsi_remove_host(shost);
@@ -604,9 +341,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
604 return; 341 return;
605} 342}
606 343
607/*
608 * Support functions for FC transport class
609 */
610static struct fc_host_statistics* 344static struct fc_host_statistics*
611zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) 345zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
612{ 346{
@@ -622,13 +356,12 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
622 return adapter->fc_stats; 356 return adapter->fc_stats;
623} 357}
624 358
625static void 359static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
626zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, 360 struct fsf_qtcb_bottom_port *data,
627 struct fsf_qtcb_bottom_port *data, 361 struct fsf_qtcb_bottom_port *old)
628 struct fsf_qtcb_bottom_port *old)
629{ 362{
630 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - 363 fc_stats->seconds_since_last_reset =
631 old->seconds_since_last_reset; 364 data->seconds_since_last_reset - old->seconds_since_last_reset;
632 fc_stats->tx_frames = data->tx_frames - old->tx_frames; 365 fc_stats->tx_frames = data->tx_frames - old->tx_frames;
633 fc_stats->tx_words = data->tx_words - old->tx_words; 366 fc_stats->tx_words = data->tx_words - old->tx_words;
634 fc_stats->rx_frames = data->rx_frames - old->rx_frames; 367 fc_stats->rx_frames = data->rx_frames - old->rx_frames;
@@ -639,26 +372,25 @@ zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
639 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; 372 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
640 fc_stats->link_failure_count = data->link_failure - old->link_failure; 373 fc_stats->link_failure_count = data->link_failure - old->link_failure;
641 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; 374 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
642 fc_stats->loss_of_signal_count = data->loss_of_signal - 375 fc_stats->loss_of_signal_count =
643 old->loss_of_signal; 376 data->loss_of_signal - old->loss_of_signal;
644 fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - 377 fc_stats->prim_seq_protocol_err_count =
645 old->psp_error_counts; 378 data->psp_error_counts - old->psp_error_counts;
646 fc_stats->invalid_tx_word_count = data->invalid_tx_words - 379 fc_stats->invalid_tx_word_count =
647 old->invalid_tx_words; 380 data->invalid_tx_words - old->invalid_tx_words;
648 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; 381 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
649 fc_stats->fcp_input_requests = data->input_requests - 382 fc_stats->fcp_input_requests =
650 old->input_requests; 383 data->input_requests - old->input_requests;
651 fc_stats->fcp_output_requests = data->output_requests - 384 fc_stats->fcp_output_requests =
652 old->output_requests; 385 data->output_requests - old->output_requests;
653 fc_stats->fcp_control_requests = data->control_requests - 386 fc_stats->fcp_control_requests =
654 old->control_requests; 387 data->control_requests - old->control_requests;
655 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; 388 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
656 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; 389 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
657} 390}
658 391
659static void 392static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
660zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, 393 struct fsf_qtcb_bottom_port *data)
661 struct fsf_qtcb_bottom_port *data)
662{ 394{
663 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; 395 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
664 fc_stats->tx_frames = data->tx_frames; 396 fc_stats->tx_frames = data->tx_frames;
@@ -682,22 +414,14 @@ zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
682 fc_stats->fcp_output_megabytes = data->output_mb; 414 fc_stats->fcp_output_megabytes = data->output_mb;
683} 415}
684 416
685/** 417static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
686 * zfcp_get_fc_host_stats - provide fc_host_statistics for scsi_transport_fc
687 *
688 * assumption: scsi_transport_fc synchronizes calls of
689 * get_fc_host_stats and reset_fc_host_stats
690 * (XXX to be checked otherwise introduce locking)
691 */
692static struct fc_host_statistics *
693zfcp_get_fc_host_stats(struct Scsi_Host *shost)
694{ 418{
695 struct zfcp_adapter *adapter; 419 struct zfcp_adapter *adapter;
696 struct fc_host_statistics *fc_stats; 420 struct fc_host_statistics *fc_stats;
697 struct fsf_qtcb_bottom_port *data; 421 struct fsf_qtcb_bottom_port *data;
698 int ret; 422 int ret;
699 423
700 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 424 adapter = (struct zfcp_adapter *)host->hostdata[0];
701 fc_stats = zfcp_init_fc_host_stats(adapter); 425 fc_stats = zfcp_init_fc_host_stats(adapter);
702 if (!fc_stats) 426 if (!fc_stats)
703 return NULL; 427 return NULL;
@@ -709,26 +433,25 @@ zfcp_get_fc_host_stats(struct Scsi_Host *shost)
709 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 433 ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
710 if (ret) { 434 if (ret) {
711 kfree(data); 435 kfree(data);
712 return NULL; /* XXX return zeroed fc_stats? */ 436 return NULL;
713 } 437 }
714 438
715 if (adapter->stats_reset && 439 if (adapter->stats_reset &&
716 ((jiffies/HZ - adapter->stats_reset) < 440 ((jiffies/HZ - adapter->stats_reset) <
717 data->seconds_since_last_reset)) { 441 data->seconds_since_last_reset))
718 zfcp_adjust_fc_host_stats(fc_stats, data, 442 zfcp_adjust_fc_host_stats(fc_stats, data,
719 adapter->stats_reset_data); 443 adapter->stats_reset_data);
720 } else 444 else
721 zfcp_set_fc_host_stats(fc_stats, data); 445 zfcp_set_fc_host_stats(fc_stats, data);
722 446
723 kfree(data); 447 kfree(data);
724 return fc_stats; 448 return fc_stats;
725} 449}
726 450
727static void 451static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
728zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
729{ 452{
730 struct zfcp_adapter *adapter; 453 struct zfcp_adapter *adapter;
731 struct fsf_qtcb_bottom_port *data, *old_data; 454 struct fsf_qtcb_bottom_port *data;
732 int ret; 455 int ret;
733 456
734 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 457 adapter = (struct zfcp_adapter *)shost->hostdata[0];
@@ -737,17 +460,33 @@ zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
737 return; 460 return;
738 461
739 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 462 ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
740 if (ret) { 463 if (ret)
741 kfree(data); 464 kfree(data);
742 } else { 465 else {
743 adapter->stats_reset = jiffies/HZ; 466 adapter->stats_reset = jiffies/HZ;
744 old_data = adapter->stats_reset_data; 467 kfree(adapter->stats_reset_data);
745 adapter->stats_reset_data = data; /* finally freed in 468 adapter->stats_reset_data = data; /* finally freed in
746 adater_dequeue */ 469 adapter_dequeue */
747 kfree(old_data);
748 } 470 }
749} 471}
750 472
473static void zfcp_get_host_port_state(struct Scsi_Host *shost)
474{
475 struct zfcp_adapter *adapter =
476 (struct zfcp_adapter *)shost->hostdata[0];
477 int status = atomic_read(&adapter->status);
478
479 if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
480 !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
481 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
482 else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
483 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
484 else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
485 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
486 else
487 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
488}
489
751static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 490static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
752{ 491{
753 rport->dev_loss_tmo = timeout; 492 rport->dev_loss_tmo = timeout;
@@ -770,6 +509,8 @@ struct fc_function_template zfcp_transport_functions = {
770 .get_fc_host_stats = zfcp_get_fc_host_stats, 509 .get_fc_host_stats = zfcp_get_fc_host_stats,
771 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 510 .reset_fc_host_stats = zfcp_reset_fc_host_stats,
772 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 511 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
512 .get_host_port_state = zfcp_get_host_port_state,
513 .show_host_port_state = 1,
773 /* no functions registered for following dynamic attributes but 514 /* no functions registered for following dynamic attributes but
774 directly set by LLDD */ 515 directly set by LLDD */
775 .show_host_port_type = 1, 516 .show_host_port_type = 1,
@@ -778,149 +519,26 @@ struct fc_function_template zfcp_transport_functions = {
778 .disable_target_scan = 1, 519 .disable_target_scan = 1,
779}; 520};
780 521
781/** 522struct zfcp_data zfcp_data = {
782 * ZFCP_DEFINE_SCSI_ATTR 523 .scsi_host_template = {
783 * @_name: name of show attribute 524 .name = "zfcp",
784 * @_format: format string 525 .module = THIS_MODULE,
785 * @_value: value to print 526 .proc_name = "zfcp",
786 * 527 .slave_alloc = zfcp_scsi_slave_alloc,
787 * Generates attribute for a unit. 528 .slave_configure = zfcp_scsi_slave_configure,
788 */ 529 .slave_destroy = zfcp_scsi_slave_destroy,
789#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ 530 .queuecommand = zfcp_scsi_queuecommand,
790static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, struct device_attribute *attr, \ 531 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
791 char *buf) \ 532 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
792{ \ 533 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
793 struct scsi_device *sdev; \ 534 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
794 struct zfcp_unit *unit; \ 535 .can_queue = 4096,
795 \ 536 .this_id = -1,
796 sdev = to_scsi_device(dev); \ 537 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
797 unit = sdev->hostdata; \ 538 .cmd_per_lun = 1,
798 return sprintf(buf, _format, _value); \ 539 .use_clustering = 1,
799} \ 540 .sdev_attrs = zfcp_sysfs_sdev_attrs,
800 \ 541 .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
801static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 542 .shost_attrs = zfcp_sysfs_shost_attrs,
802 543 },
803ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit));
804ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
805ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
806
807static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
808 &dev_attr_fcp_lun,
809 &dev_attr_wwpn,
810 &dev_attr_hba_id,
811 NULL
812};
813
814static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
815 struct device_attribute *attr,
816 char *buf)
817{
818 struct Scsi_Host *scsi_host = dev_to_shost(dev);
819 struct fsf_qtcb_bottom_port *qtcb_port;
820 int retval;
821 struct zfcp_adapter *adapter;
822
823 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
824 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
825 return -EOPNOTSUPP;
826
827 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
828 if (!qtcb_port)
829 return -ENOMEM;
830
831 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
832 if (!retval)
833 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
834 qtcb_port->cb_util, qtcb_port->a_util);
835 kfree(qtcb_port);
836 return retval;
837}
838
839static int zfcp_sysfs_adapter_ex_config(struct device *dev,
840 struct fsf_statistics_info *stat_inf)
841{
842 int retval;
843 struct fsf_qtcb_bottom_config *qtcb_config;
844 struct Scsi_Host *scsi_host = dev_to_shost(dev);
845 struct zfcp_adapter *adapter;
846
847 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
848 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
849 return -EOPNOTSUPP;
850
851 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
852 GFP_KERNEL);
853 if (!qtcb_config)
854 return -ENOMEM;
855
856 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
857 if (!retval)
858 *stat_inf = qtcb_config->stat_info;
859
860 kfree(qtcb_config);
861 return retval;
862}
863
864static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
865 struct device_attribute *attr,
866 char *buf)
867{
868 struct fsf_statistics_info stat_info;
869 int retval;
870
871 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
872 if (retval)
873 return retval;
874
875 return sprintf(buf, "%llu %llu %llu\n",
876 (unsigned long long) stat_info.input_req,
877 (unsigned long long) stat_info.output_req,
878 (unsigned long long) stat_info.control_req);
879}
880
881static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 struct fsf_statistics_info stat_info;
886 int retval;
887
888 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
889 if (retval)
890 return retval;
891
892 return sprintf(buf, "%llu %llu\n",
893 (unsigned long long) stat_info.input_mb,
894 (unsigned long long) stat_info.output_mb);
895}
896
897static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
898 struct device_attribute *attr,
899 char *buf)
900{
901 struct fsf_statistics_info stat_info;
902 int retval;
903
904 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
905 if (retval)
906 return retval;
907
908 return sprintf(buf, "%llu\n",
909 (unsigned long long) stat_info.seconds_act);
910}
911
912static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
913static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
914static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
915static DEVICE_ATTR(seconds_active, S_IRUGO,
916 zfcp_sysfs_adapter_sec_active_show, NULL);
917
918static struct device_attribute *zfcp_a_stats_attrs[] = {
919 &dev_attr_utilization,
920 &dev_attr_requests,
921 &dev_attr_megabytes,
922 &dev_attr_seconds_active,
923 NULL
924}; 544};
925
926#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
new file mode 100644
index 000000000000..2e85c6c49e7d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -0,0 +1,496 @@
1/*
2 * zfcp device driver
3 *
4 * sysfs attributes.
5 *
6 * Copyright IBM Corporation 2008
7 */
8
9#include "zfcp_ext.h"
10
11#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
12struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
13 _show, _store)
14#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
15static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
16 struct device_attribute *at,\
17 char *buf) \
18{ \
19 struct _feat_def *_feat = dev_get_drvdata(dev); \
20 \
21 return sprintf(buf, _format, _value); \
22} \
23static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
24 zfcp_sysfs_##_feat##_##_name##_show, NULL);
25
26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
27 atomic_read(&adapter->status));
28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
29 adapter->peer_wwnn);
30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
31 adapter->peer_wwpn);
32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
33 adapter->peer_d_id);
34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
35 adapter->hydra_version);
36ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
37 adapter->fsf_lic_version);
38ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
39 adapter->hardware_version);
40ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
41 (atomic_read(&adapter->status) &
42 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
43
44ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
45 atomic_read(&port->status));
46ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
47 (atomic_read(&port->status) &
48 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
49ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
50 (atomic_read(&port->status) &
51 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
52
53ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
54 atomic_read(&unit->status));
55ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
56 (atomic_read(&unit->status) &
57 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
58ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
59 (atomic_read(&unit->status) &
60 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
61ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
62 (atomic_read(&unit->status) &
63 ZFCP_STATUS_UNIT_SHARED) != 0);
64ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
65 (atomic_read(&unit->status) &
66 ZFCP_STATUS_UNIT_READONLY) != 0);
67
68#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \
69static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
70 struct device_attribute *attr, \
71 char *buf) \
72{ \
73 struct _feat_def *_feat = dev_get_drvdata(dev); \
74 \
75 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
76 return sprintf(buf, "1\n"); \
77 else \
78 return sprintf(buf, "0\n"); \
79} \
80static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
81 struct device_attribute *attr,\
82 const char *buf, size_t count)\
83{ \
84 struct _feat_def *_feat = dev_get_drvdata(dev); \
85 unsigned long val; \
86 int retval = 0; \
87 \
88 down(&zfcp_data.config_sema); \
89 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
90 retval = -EBUSY; \
91 goto out; \
92 } \
93 \
94 if (strict_strtoul(buf, 0, &val) || val != 0) { \
95 retval = -EINVAL; \
96 goto out; \
97 } \
98 \
99 zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
100 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
101 zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
102 _reopen_id, NULL); \
103 zfcp_erp_wait(_adapter); \
104out: \
105 up(&zfcp_data.config_sema); \
106 return retval ? retval : (ssize_t) count; \
107} \
108static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
109 zfcp_sysfs_##_feat##_failed_show, \
110 zfcp_sysfs_##_feat##_failed_store);
111
112ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93);
113ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96);
114ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97);
115
116static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
117 struct device_attribute *attr,
118 const char *buf, size_t count)
119{
120 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
121 int ret;
122
123 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
124 return -EBUSY;
125
126 ret = zfcp_scan_ports(adapter);
127 return ret ? ret : (ssize_t) count;
128}
129static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
130 zfcp_sysfs_port_rescan_store);
131
132static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
135{
136 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
137 struct zfcp_port *port;
138 wwn_t wwpn;
139 int retval = 0;
140
141 down(&zfcp_data.config_sema);
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
143 retval = -EBUSY;
144 goto out;
145 }
146
147 if (strict_strtoull(buf, 0, &wwpn)) {
148 retval = -EINVAL;
149 goto out;
150 }
151
152 write_lock_irq(&zfcp_data.config_lock);
153 port = zfcp_get_port_by_wwpn(adapter, wwpn);
154 if (port && (atomic_read(&port->refcount) == 0)) {
155 zfcp_port_get(port);
156 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
157 list_move(&port->list, &adapter->port_remove_lh);
158 } else
159 port = NULL;
160 write_unlock_irq(&zfcp_data.config_lock);
161
162 if (!port) {
163 retval = -ENXIO;
164 goto out;
165 }
166
167 zfcp_erp_port_shutdown(port, 0, 92, NULL);
168 zfcp_erp_wait(adapter);
169 zfcp_port_put(port);
170 zfcp_port_dequeue(port);
171 out:
172 up(&zfcp_data.config_sema);
173 return retval ? retval : (ssize_t) count;
174}
175static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
176 zfcp_sysfs_port_remove_store);
177
178static struct attribute *zfcp_adapter_attrs[] = {
179 &dev_attr_adapter_failed.attr,
180 &dev_attr_adapter_in_recovery.attr,
181 &dev_attr_adapter_port_remove.attr,
182 &dev_attr_adapter_port_rescan.attr,
183 &dev_attr_adapter_peer_wwnn.attr,
184 &dev_attr_adapter_peer_wwpn.attr,
185 &dev_attr_adapter_peer_d_id.attr,
186 &dev_attr_adapter_card_version.attr,
187 &dev_attr_adapter_lic_version.attr,
188 &dev_attr_adapter_status.attr,
189 &dev_attr_adapter_hardware_version.attr,
190 NULL
191};
192
193struct attribute_group zfcp_sysfs_adapter_attrs = {
194 .attrs = zfcp_adapter_attrs,
195};
196
197static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
198 struct device_attribute *attr,
199 const char *buf, size_t count)
200{
201 struct zfcp_port *port = dev_get_drvdata(dev);
202 struct zfcp_unit *unit;
203 fcp_lun_t fcp_lun;
204 int retval = -EINVAL;
205
206 down(&zfcp_data.config_sema);
207 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
208 retval = -EBUSY;
209 goto out;
210 }
211
212 if (strict_strtoull(buf, 0, &fcp_lun))
213 goto out;
214
215 unit = zfcp_unit_enqueue(port, fcp_lun);
216 if (IS_ERR(unit))
217 goto out;
218
219 retval = 0;
220
221 zfcp_erp_unit_reopen(unit, 0, 94, NULL);
222 zfcp_erp_wait(unit->port->adapter);
223 zfcp_unit_put(unit);
224out:
225 up(&zfcp_data.config_sema);
226 return retval ? retval : (ssize_t) count;
227}
228static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
229
230static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
231 struct device_attribute *attr,
232 const char *buf, size_t count)
233{
234 struct zfcp_port *port = dev_get_drvdata(dev);
235 struct zfcp_unit *unit;
236 fcp_lun_t fcp_lun;
237 int retval = 0;
238
239 down(&zfcp_data.config_sema);
240 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
241 retval = -EBUSY;
242 goto out;
243 }
244
245 if (strict_strtoull(buf, 0, &fcp_lun)) {
246 retval = -EINVAL;
247 goto out;
248 }
249
250 write_lock_irq(&zfcp_data.config_lock);
251 unit = zfcp_get_unit_by_lun(port, fcp_lun);
252 if (unit && (atomic_read(&unit->refcount) == 0)) {
253 zfcp_unit_get(unit);
254 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
255 list_move(&unit->list, &port->unit_remove_lh);
256 } else
257 unit = NULL;
258
259 write_unlock_irq(&zfcp_data.config_lock);
260
261 if (!unit) {
262 retval = -ENXIO;
263 goto out;
264 }
265
266 zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
267 zfcp_erp_wait(unit->port->adapter);
268 zfcp_unit_put(unit);
269 zfcp_unit_dequeue(unit);
270out:
271 up(&zfcp_data.config_sema);
272 return retval ? retval : (ssize_t) count;
273}
274static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
275
276static struct attribute *zfcp_port_ns_attrs[] = {
277 &dev_attr_port_failed.attr,
278 &dev_attr_port_in_recovery.attr,
279 &dev_attr_port_status.attr,
280 &dev_attr_port_access_denied.attr,
281 NULL
282};
283
284/**
285 * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
286 */
287struct attribute_group zfcp_sysfs_ns_port_attrs = {
288 .attrs = zfcp_port_ns_attrs,
289};
290
291static struct attribute *zfcp_port_no_ns_attrs[] = {
292 &dev_attr_unit_add.attr,
293 &dev_attr_unit_remove.attr,
294 &dev_attr_port_failed.attr,
295 &dev_attr_port_in_recovery.attr,
296 &dev_attr_port_status.attr,
297 &dev_attr_port_access_denied.attr,
298 NULL
299};
300
301/**
302 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
303 */
304struct attribute_group zfcp_sysfs_port_attrs = {
305 .attrs = zfcp_port_no_ns_attrs,
306};
307
308static struct attribute *zfcp_unit_attrs[] = {
309 &dev_attr_unit_failed.attr,
310 &dev_attr_unit_in_recovery.attr,
311 &dev_attr_unit_status.attr,
312 &dev_attr_unit_access_denied.attr,
313 &dev_attr_unit_access_shared.attr,
314 &dev_attr_unit_access_readonly.attr,
315 NULL
316};
317
318struct attribute_group zfcp_sysfs_unit_attrs = {
319 .attrs = zfcp_unit_attrs,
320};
321
322#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
323static ssize_t \
324zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
325 struct device_attribute *attr, \
326 char *buf) { \
327 struct scsi_device *sdev = to_scsi_device(dev); \
328 struct zfcp_unit *unit = sdev->hostdata; \
329 struct zfcp_latencies *lat = &unit->latencies; \
330 struct zfcp_adapter *adapter = unit->port->adapter; \
331 unsigned long flags; \
332 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
333 \
334 spin_lock_irqsave(&lat->lock, flags); \
335 fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
336 fmin = lat->_name.fabric.min * adapter->timer_ticks; \
337 fmax = lat->_name.fabric.max * adapter->timer_ticks; \
338 csum = lat->_name.channel.sum * adapter->timer_ticks; \
339 cmin = lat->_name.channel.min * adapter->timer_ticks; \
340 cmax = lat->_name.channel.max * adapter->timer_ticks; \
341 cc = lat->_name.counter; \
342 spin_unlock_irqrestore(&lat->lock, flags); \
343 \
344 do_div(fsum, 1000); \
345 do_div(fmin, 1000); \
346 do_div(fmax, 1000); \
347 do_div(csum, 1000); \
348 do_div(cmin, 1000); \
349 do_div(cmax, 1000); \
350 \
351 return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
352 fmin, fmax, fsum, cmin, cmax, csum, cc); \
353} \
354static ssize_t \
355zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
356 struct device_attribute *attr, \
357 const char *buf, size_t count) \
358{ \
359 struct scsi_device *sdev = to_scsi_device(dev); \
360 struct zfcp_unit *unit = sdev->hostdata; \
361 struct zfcp_latencies *lat = &unit->latencies; \
362 unsigned long flags; \
363 \
364 spin_lock_irqsave(&lat->lock, flags); \
365 lat->_name.fabric.sum = 0; \
366 lat->_name.fabric.min = 0xFFFFFFFF; \
367 lat->_name.fabric.max = 0; \
368 lat->_name.channel.sum = 0; \
369 lat->_name.channel.min = 0xFFFFFFFF; \
370 lat->_name.channel.max = 0; \
371 lat->_name.counter = 0; \
372 spin_unlock_irqrestore(&lat->lock, flags); \
373 \
374 return (ssize_t) count; \
375} \
376static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
377 zfcp_sysfs_unit_##_name##_latency_show, \
378 zfcp_sysfs_unit_##_name##_latency_store);
379
380ZFCP_DEFINE_LATENCY_ATTR(read);
381ZFCP_DEFINE_LATENCY_ATTR(write);
382ZFCP_DEFINE_LATENCY_ATTR(cmd);
383
384#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
385static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
386 struct device_attribute *attr,\
387 char *buf) \
388{ \
389 struct scsi_device *sdev = to_scsi_device(dev); \
390 struct zfcp_unit *unit = sdev->hostdata; \
391 \
392 return sprintf(buf, _format, _value); \
393} \
394static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
395
396ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
397 unit->port->adapter->ccw_device->dev.bus_id);
398ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
399ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
400
401struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
402 &dev_attr_fcp_lun,
403 &dev_attr_wwpn,
404 &dev_attr_hba_id,
405 &dev_attr_read_latency,
406 &dev_attr_write_latency,
407 &dev_attr_cmd_latency,
408 NULL
409};
410
411static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
412 struct device_attribute *attr,
413 char *buf)
414{
415 struct Scsi_Host *scsi_host = dev_to_shost(dev);
416 struct fsf_qtcb_bottom_port *qtcb_port;
417 struct zfcp_adapter *adapter;
418 int retval;
419
420 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
421 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
422 return -EOPNOTSUPP;
423
424 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
425 if (!qtcb_port)
426 return -ENOMEM;
427
428 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
429 if (!retval)
430 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
431 qtcb_port->cb_util, qtcb_port->a_util);
432 kfree(qtcb_port);
433 return retval;
434}
435static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
436
437static int zfcp_sysfs_adapter_ex_config(struct device *dev,
438 struct fsf_statistics_info *stat_inf)
439{
440 struct Scsi_Host *scsi_host = dev_to_shost(dev);
441 struct fsf_qtcb_bottom_config *qtcb_config;
442 struct zfcp_adapter *adapter;
443 int retval;
444
445 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
446 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
447 return -EOPNOTSUPP;
448
449 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
450 GFP_KERNEL);
451 if (!qtcb_config)
452 return -ENOMEM;
453
454 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
455 if (!retval)
456 *stat_inf = qtcb_config->stat_info;
457
458 kfree(qtcb_config);
459 return retval;
460}
461
462#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
463static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
464 struct device_attribute *attr,\
465 char *buf) \
466{ \
467 struct fsf_statistics_info stat_info; \
468 int retval; \
469 \
470 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
471 if (retval) \
472 return retval; \
473 \
474 return sprintf(buf, _format, ## _arg); \
475} \
476static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
477
478ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
479 (unsigned long long) stat_info.input_req,
480 (unsigned long long) stat_info.output_req,
481 (unsigned long long) stat_info.control_req);
482
483ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
484 (unsigned long long) stat_info.input_mb,
485 (unsigned long long) stat_info.output_mb);
486
487ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
488 (unsigned long long) stat_info.seconds_act);
489
490struct device_attribute *zfcp_sysfs_shost_attrs[] = {
491 &dev_attr_utilization,
492 &dev_attr_requests,
493 &dev_attr_megabytes,
494 &dev_attr_seconds_active,
495 NULL
496};
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
deleted file mode 100644
index ccbba4dd3a77..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * ZFCP_DEFINE_ADAPTER_ATTR
28 * @_name: name of show attribute
29 * @_format: format string
30 * @_value: value to print
31 *
32 * Generates attributes for an adapter.
33 */
34#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \
35static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct device_attribute *attr, \
36 char *buf) \
37{ \
38 struct zfcp_adapter *adapter; \
39 \
40 adapter = dev_get_drvdata(dev); \
41 return sprintf(buf, _format, _value); \
42} \
43 \
44static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
45
46ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
47ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
48ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
49ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
50ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
51ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
52ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
53 adapter->hardware_version);
54ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
55 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
56
57/**
58 * zfcp_sysfs_port_add_store - add a port to sysfs tree
59 * @dev: pointer to belonging device
60 * @buf: pointer to input buffer
61 * @count: number of bytes in buffer
62 *
63 * Store function of the "port_add" attribute of an adapter.
64 */
65static ssize_t
66zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
67{
68 wwn_t wwpn;
69 char *endp;
70 struct zfcp_adapter *adapter;
71 struct zfcp_port *port;
72 int retval = -EINVAL;
73
74 down(&zfcp_data.config_sema);
75
76 adapter = dev_get_drvdata(dev);
77 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
78 retval = -EBUSY;
79 goto out;
80 }
81
82 wwpn = simple_strtoull(buf, &endp, 0);
83 if ((endp + 1) < (buf + count))
84 goto out;
85
86 port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
87 if (!port)
88 goto out;
89
90 retval = 0;
91
92 zfcp_erp_port_reopen(port, 0, 91, NULL);
93 zfcp_erp_wait(port->adapter);
94 zfcp_port_put(port);
95 out:
96 up(&zfcp_data.config_sema);
97 return retval ? retval : (ssize_t) count;
98}
99
100static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
101
102/**
103 * zfcp_sysfs_port_remove_store - remove a port from sysfs tree
104 * @dev: pointer to belonging device
105 * @buf: pointer to input buffer
106 * @count: number of bytes in buffer
107 *
108 * Store function of the "port_remove" attribute of an adapter.
109 */
110static ssize_t
111zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
112{
113 struct zfcp_adapter *adapter;
114 struct zfcp_port *port;
115 wwn_t wwpn;
116 char *endp;
117 int retval = 0;
118
119 down(&zfcp_data.config_sema);
120
121 adapter = dev_get_drvdata(dev);
122 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
123 retval = -EBUSY;
124 goto out;
125 }
126
127 wwpn = simple_strtoull(buf, &endp, 0);
128 if ((endp + 1) < (buf + count)) {
129 retval = -EINVAL;
130 goto out;
131 }
132
133 write_lock_irq(&zfcp_data.config_lock);
134 port = zfcp_get_port_by_wwpn(adapter, wwpn);
135 if (port && (atomic_read(&port->refcount) == 0)) {
136 zfcp_port_get(port);
137 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
138 list_move(&port->list, &adapter->port_remove_lh);
139 }
140 else {
141 port = NULL;
142 }
143 write_unlock_irq(&zfcp_data.config_lock);
144
145 if (!port) {
146 retval = -ENXIO;
147 goto out;
148 }
149
150 zfcp_erp_port_shutdown(port, 0, 92, NULL);
151 zfcp_erp_wait(adapter);
152 zfcp_port_put(port);
153 zfcp_port_dequeue(port);
154 out:
155 up(&zfcp_data.config_sema);
156 return retval ? retval : (ssize_t) count;
157}
158
159static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
160
161/**
162 * zfcp_sysfs_adapter_failed_store - failed state of adapter
163 * @dev: pointer to belonging device
164 * @buf: pointer to input buffer
165 * @count: number of bytes in buffer
166 *
167 * Store function of the "failed" attribute of an adapter.
168 * If a "0" gets written to "failed", error recovery will be
169 * started for the belonging adapter.
170 */
171static ssize_t
172zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 struct zfcp_adapter *adapter;
176 unsigned int val;
177 char *endp;
178 int retval = 0;
179
180 down(&zfcp_data.config_sema);
181
182 adapter = dev_get_drvdata(dev);
183 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
184 retval = -EBUSY;
185 goto out;
186 }
187
188 val = simple_strtoul(buf, &endp, 0);
189 if (((endp + 1) < (buf + count)) || (val != 0)) {
190 retval = -EINVAL;
191 goto out;
192 }
193
194 zfcp_erp_modify_adapter_status(adapter, 44, NULL,
195 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
196 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93,
197 NULL);
198 zfcp_erp_wait(adapter);
199 out:
200 up(&zfcp_data.config_sema);
201 return retval ? retval : (ssize_t) count;
202}
203
204/**
205 * zfcp_sysfs_adapter_failed_show - failed state of adapter
206 * @dev: pointer to belonging device
207 * @buf: pointer to input buffer
208 *
209 * Show function of "failed" attribute of adapter. Will be
210 * "0" if adapter is working, otherwise "1".
211 */
212static ssize_t
213zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
214{
215 struct zfcp_adapter *adapter;
216
217 adapter = dev_get_drvdata(dev);
218 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
219 return sprintf(buf, "1\n");
220 else
221 return sprintf(buf, "0\n");
222}
223
224static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
225 zfcp_sysfs_adapter_failed_store);
226
227static struct attribute *zfcp_adapter_attrs[] = {
228 &dev_attr_failed.attr,
229 &dev_attr_in_recovery.attr,
230 &dev_attr_port_remove.attr,
231 &dev_attr_port_add.attr,
232 &dev_attr_peer_wwnn.attr,
233 &dev_attr_peer_wwpn.attr,
234 &dev_attr_peer_d_id.attr,
235 &dev_attr_card_version.attr,
236 &dev_attr_lic_version.attr,
237 &dev_attr_status.attr,
238 &dev_attr_hardware_version.attr,
239 NULL
240};
241
242static struct attribute_group zfcp_adapter_attr_group = {
243 .attrs = zfcp_adapter_attrs,
244};
245
246/**
247 * zfcp_sysfs_create_adapter_files - create sysfs adapter files
248 * @dev: pointer to belonging device
249 *
250 * Create all attributes of the sysfs representation of an adapter.
251 */
252int
253zfcp_sysfs_adapter_create_files(struct device *dev)
254{
255 return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
256}
257
258/**
259 * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
260 * @dev: pointer to belonging device
261 *
262 * Remove all attributes of the sysfs representation of an adapter.
263 */
264void
265zfcp_sysfs_adapter_remove_files(struct device *dev)
266{
267 sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
268}
269
270#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c
deleted file mode 100644
index 651edd58906a..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_driver.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
28 * @_name: name of attribute
29 * @_define: name of ZFCP loglevel define
30 *
31 * Generates store function for a sysfs loglevel attribute of zfcp driver.
32 */
33#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
34static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
35 const char *buf, \
36 size_t count) \
37{ \
38 unsigned int loglevel; \
39 unsigned int new_loglevel; \
40 char *endp; \
41 \
42 new_loglevel = simple_strtoul(buf, &endp, 0); \
43 if ((endp + 1) < (buf + count)) \
44 return -EINVAL; \
45 if (new_loglevel > 3) \
46 return -EINVAL; \
47 down(&zfcp_data.config_sema); \
48 loglevel = atomic_read(&zfcp_data.loglevel); \
49 loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
50 loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
51 atomic_set(&zfcp_data.loglevel, loglevel); \
52 up(&zfcp_data.config_sema); \
53 return count; \
54} \
55 \
56static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
57 char *buf) \
58{ \
59 return sprintf(buf,"%d\n", (unsigned int) \
60 ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
61} \
62 \
63static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
64 zfcp_sysfs_loglevel_##_name##_show, \
65 zfcp_sysfs_loglevel_##_name##_store);
66
67ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
68ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
69ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
70ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
71ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
72ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
73ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
74ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
75
76static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
77 char *buf)
78{
79 return sprintf(buf, "%s\n", zfcp_data.driver_version);
80}
81
82static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
83
84static struct attribute *zfcp_driver_attrs[] = {
85 &driver_attr_loglevel_other.attr,
86 &driver_attr_loglevel_scsi.attr,
87 &driver_attr_loglevel_fsf.attr,
88 &driver_attr_loglevel_config.attr,
89 &driver_attr_loglevel_cio.attr,
90 &driver_attr_loglevel_qdio.attr,
91 &driver_attr_loglevel_erp.attr,
92 &driver_attr_loglevel_fc.attr,
93 &driver_attr_version.attr,
94 NULL
95};
96
97static struct attribute_group zfcp_driver_attr_group = {
98 .attrs = zfcp_driver_attrs,
99};
100
101struct attribute_group *zfcp_driver_attr_groups[] = {
102 &zfcp_driver_attr_group,
103 NULL,
104};
105
106#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
deleted file mode 100644
index 703c1b5cb602..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_port.c
+++ /dev/null
@@ -1,295 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * zfcp_sysfs_port_release - gets called when a struct device port is released
28 * @dev: pointer to belonging device
29 */
30void
31zfcp_sysfs_port_release(struct device *dev)
32{
33 kfree(dev);
34}
35
36/**
37 * ZFCP_DEFINE_PORT_ATTR
38 * @_name: name of show attribute
39 * @_format: format string
40 * @_value: value to print
41 *
42 * Generates attributes for a port.
43 */
44#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \
45static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, struct device_attribute *attr, \
46 char *buf) \
47{ \
48 struct zfcp_port *port; \
49 \
50 port = dev_get_drvdata(dev); \
51 return sprintf(buf, _format, _value); \
52} \
53 \
54static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
55
56ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
57ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
58 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
59ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
60 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
61
62/**
63 * zfcp_sysfs_unit_add_store - add a unit to sysfs tree
64 * @dev: pointer to belonging device
65 * @buf: pointer to input buffer
66 * @count: number of bytes in buffer
67 *
68 * Store function of the "unit_add" attribute of a port.
69 */
70static ssize_t
71zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
72{
73 fcp_lun_t fcp_lun;
74 char *endp;
75 struct zfcp_port *port;
76 struct zfcp_unit *unit;
77 int retval = -EINVAL;
78
79 down(&zfcp_data.config_sema);
80
81 port = dev_get_drvdata(dev);
82 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
83 retval = -EBUSY;
84 goto out;
85 }
86
87 fcp_lun = simple_strtoull(buf, &endp, 0);
88 if ((endp + 1) < (buf + count))
89 goto out;
90
91 unit = zfcp_unit_enqueue(port, fcp_lun);
92 if (!unit)
93 goto out;
94
95 retval = 0;
96
97 zfcp_erp_unit_reopen(unit, 0, 94, NULL);
98 zfcp_erp_wait(unit->port->adapter);
99 zfcp_unit_put(unit);
100 out:
101 up(&zfcp_data.config_sema);
102 return retval ? retval : (ssize_t) count;
103}
104
105static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
106
107/**
108 * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
109 * @dev: pointer to belonging device
110 * @buf: pointer to input buffer
111 * @count: number of bytes in buffer
112 */
113static ssize_t
114zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
115{
116 struct zfcp_port *port;
117 struct zfcp_unit *unit;
118 fcp_lun_t fcp_lun;
119 char *endp;
120 int retval = 0;
121
122 down(&zfcp_data.config_sema);
123
124 port = dev_get_drvdata(dev);
125 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
126 retval = -EBUSY;
127 goto out;
128 }
129
130 fcp_lun = simple_strtoull(buf, &endp, 0);
131 if ((endp + 1) < (buf + count)) {
132 retval = -EINVAL;
133 goto out;
134 }
135
136 write_lock_irq(&zfcp_data.config_lock);
137 unit = zfcp_get_unit_by_lun(port, fcp_lun);
138 if (unit && (atomic_read(&unit->refcount) == 0)) {
139 zfcp_unit_get(unit);
140 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
141 list_move(&unit->list, &port->unit_remove_lh);
142 }
143 else {
144 unit = NULL;
145 }
146 write_unlock_irq(&zfcp_data.config_lock);
147
148 if (!unit) {
149 retval = -ENXIO;
150 goto out;
151 }
152
153 zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
154 zfcp_erp_wait(unit->port->adapter);
155 zfcp_unit_put(unit);
156 zfcp_unit_dequeue(unit);
157 out:
158 up(&zfcp_data.config_sema);
159 return retval ? retval : (ssize_t) count;
160}
161
162static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
163
164/**
165 * zfcp_sysfs_port_failed_store - failed state of port
166 * @dev: pointer to belonging device
167 * @buf: pointer to input buffer
168 * @count: number of bytes in buffer
169 *
170 * Store function of the "failed" attribute of a port.
171 * If a "0" gets written to "failed", error recovery will be
172 * started for the belonging port.
173 */
174static ssize_t
175zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
176{
177 struct zfcp_port *port;
178 unsigned int val;
179 char *endp;
180 int retval = 0;
181
182 down(&zfcp_data.config_sema);
183
184 port = dev_get_drvdata(dev);
185 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
186 retval = -EBUSY;
187 goto out;
188 }
189
190 val = simple_strtoul(buf, &endp, 0);
191 if (((endp + 1) < (buf + count)) || (val != 0)) {
192 retval = -EINVAL;
193 goto out;
194 }
195
196 zfcp_erp_modify_port_status(port, 45, NULL,
197 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
198 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL);
199 zfcp_erp_wait(port->adapter);
200 out:
201 up(&zfcp_data.config_sema);
202 return retval ? retval : (ssize_t) count;
203}
204
205/**
206 * zfcp_sysfs_port_failed_show - failed state of port
207 * @dev: pointer to belonging device
208 * @buf: pointer to input buffer
209 *
210 * Show function of "failed" attribute of port. Will be
211 * "0" if port is working, otherwise "1".
212 */
213static ssize_t
214zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
215{
216 struct zfcp_port *port;
217
218 port = dev_get_drvdata(dev);
219 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
220 return sprintf(buf, "1\n");
221 else
222 return sprintf(buf, "0\n");
223}
224
225static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
226 zfcp_sysfs_port_failed_store);
227
228/**
229 * zfcp_port_common_attrs
230 * sysfs attributes that are common for all kind of fc ports.
231 */
232static struct attribute *zfcp_port_common_attrs[] = {
233 &dev_attr_failed.attr,
234 &dev_attr_in_recovery.attr,
235 &dev_attr_status.attr,
236 &dev_attr_access_denied.attr,
237 NULL
238};
239
240static struct attribute_group zfcp_port_common_attr_group = {
241 .attrs = zfcp_port_common_attrs,
242};
243
244/**
245 * zfcp_port_no_ns_attrs
246 * sysfs attributes not to be used for nameserver ports.
247 */
248static struct attribute *zfcp_port_no_ns_attrs[] = {
249 &dev_attr_unit_add.attr,
250 &dev_attr_unit_remove.attr,
251 NULL
252};
253
254static struct attribute_group zfcp_port_no_ns_attr_group = {
255 .attrs = zfcp_port_no_ns_attrs,
256};
257
258/**
259 * zfcp_sysfs_port_create_files - create sysfs port files
260 * @dev: pointer to belonging device
261 *
262 * Create all attributes of the sysfs representation of a port.
263 */
264int
265zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
266{
267 int retval;
268
269 retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
270
271 if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
272 return retval;
273
274 retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
275 if (retval)
276 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
277
278 return retval;
279}
280
281/**
282 * zfcp_sysfs_port_remove_files - remove sysfs port files
283 * @dev: pointer to belonging device
284 *
285 * Remove all attributes of the sysfs representation of a port.
286 */
287void
288zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
289{
290 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
291 if (!(flags & ZFCP_STATUS_PORT_WKA))
292 sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
293}
294
295#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
deleted file mode 100644
index 80fb2c2cf48a..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_unit.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * zfcp_sysfs_unit_release - gets called when a struct device unit is released
28 * @dev: pointer to belonging device
29 */
30void
31zfcp_sysfs_unit_release(struct device *dev)
32{
33 kfree(dev);
34}
35
36/**
37 * ZFCP_DEFINE_UNIT_ATTR
38 * @_name: name of show attribute
39 * @_format: format string
40 * @_value: value to print
41 *
42 * Generates attribute for a unit.
43 */
44#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \
45static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, struct device_attribute *attr, \
46 char *buf) \
47{ \
48 struct zfcp_unit *unit; \
49 \
50 unit = dev_get_drvdata(dev); \
51 return sprintf(buf, _format, _value); \
52} \
53 \
54static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
55
56ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
57ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
58 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
59ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
60 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
61ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
62 (ZFCP_STATUS_UNIT_SHARED, &unit->status));
63ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
64 (ZFCP_STATUS_UNIT_READONLY, &unit->status));
65
66/**
67 * zfcp_sysfs_unit_failed_store - failed state of unit
68 * @dev: pointer to belonging device
69 * @buf: pointer to input buffer
70 * @count: number of bytes in buffer
71 *
72 * Store function of the "failed" attribute of a unit.
73 * If a "0" gets written to "failed", error recovery will be
74 * started for the belonging unit.
75 */
76static ssize_t
77zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
78{
79 struct zfcp_unit *unit;
80 unsigned int val;
81 char *endp;
82 int retval = 0;
83
84 down(&zfcp_data.config_sema);
85 unit = dev_get_drvdata(dev);
86 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
87 retval = -EBUSY;
88 goto out;
89 }
90
91 val = simple_strtoul(buf, &endp, 0);
92 if (((endp + 1) < (buf + count)) || (val != 0)) {
93 retval = -EINVAL;
94 goto out;
95 }
96
97 zfcp_erp_modify_unit_status(unit, 46, NULL,
98 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
99 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL);
100 zfcp_erp_wait(unit->port->adapter);
101 out:
102 up(&zfcp_data.config_sema);
103 return retval ? retval : (ssize_t) count;
104}
105
106/**
107 * zfcp_sysfs_unit_failed_show - failed state of unit
108 * @dev: pointer to belonging device
109 * @buf: pointer to input buffer
110 *
111 * Show function of "failed" attribute of unit. Will be
112 * "0" if unit is working, otherwise "1".
113 */
114static ssize_t
115zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
116{
117 struct zfcp_unit *unit;
118
119 unit = dev_get_drvdata(dev);
120 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
121 return sprintf(buf, "1\n");
122 else
123 return sprintf(buf, "0\n");
124}
125
126static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
127 zfcp_sysfs_unit_failed_store);
128
129static struct attribute *zfcp_unit_attrs[] = {
130 &dev_attr_failed.attr,
131 &dev_attr_in_recovery.attr,
132 &dev_attr_status.attr,
133 &dev_attr_access_denied.attr,
134 &dev_attr_access_shared.attr,
135 &dev_attr_access_readonly.attr,
136 NULL
137};
138
139static struct attribute_group zfcp_unit_attr_group = {
140 .attrs = zfcp_unit_attrs,
141};
142
143/**
144 * zfcp_sysfs_create_unit_files - create sysfs unit files
145 * @dev: pointer to belonging device
146 *
147 * Create all attributes of the sysfs representation of a unit.
148 */
149int
150zfcp_sysfs_unit_create_files(struct device *dev)
151{
152 return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
153}
154
155/**
156 * zfcp_sysfs_remove_unit_files - remove sysfs unit files
157 * @dev: pointer to belonging device
158 *
159 * Remove all attributes of the sysfs representation of a unit.
160 */
161void
162zfcp_sysfs_unit_remove_files(struct device *dev)
163{
164 sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
165}
166
167#undef ZFCP_LOG_AREA